blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M โ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2f60424443851a68456f041d72edb87239c0e99 | 241b898f62c427c985cb46b17a5ff5321c100d01 | /validateunderlay.py | 60e4fb27fae4eb0d2b2ae4d6faea85931f2f89eb | [] | no_license | rosia/dcloud-auto-script | c1861068522ce39f7770befd062c2a7aa631398e | 5784758eec312f79816cdded503f041f98628106 | refs/heads/main | 2023-04-22T20:54:51.521994 | 2021-05-04T07:02:49 | 2021-05-04T07:02:49 | 324,029,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | import paramiko
from paramiko import SSHClient
#
switches = {'Sharedservices':'198.18.128.100', 'Fusion':'198.18.128.101', 'Core':'198.18.128.103',
'Edge1':'198.18.128.102', 'Edge2':'198.18.128.104'}
client = SSHClient()
# For loop throught the dictionary KV pairs
for switchname,switchip in switches.items():
print('Logging into switch '+ switchname)
try:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #Auto Accepts SSH host keys from devices
client.connect(switchip, username='dnacadmin', password='C1sco12345', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command("show ip ospf neighbor")
showiproute = ssh_stdout.readlines()
print('') # print the switch hostname and show command then the lines
for line in showiproute:
print(line.rstrip())
client.connect(switchip, username='dnacadmin', password='C1sco12345', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command("show ip ospf int bri")
showiproute = ssh_stdout.readlines()
print('') # print the switch hostname and show command then the lines
for line in showiproute:
print(line.rstrip())
client.connect(switchip, username='dnacadmin', password='C1sco12345', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command("show ip int bri")
showiproute = ssh_stdout.readlines()
print('') # print the switch hostname and show command then the lines
for line in showiproute:
print(line.rstrip())
client.connect(switchip, username='dnacadmin', password='C1sco12345', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command("show spanning-tree vlan 102")
showiproute = ssh_stdout.readlines()
print('') # print the switch hostname and show command then the lines
for line in showiproute:
print(line.rstrip())
client.connect(switchip, username='dnacadmin', password='C1sco12345', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command("ping 198.18.129.100")
showiproute = ssh_stdout.readlines()
print('') # print the switch hostname and show command then the lines
for line in showiproute:
print(line.rstrip())
except Exception as error_message:
print("Unable to connect")
print(error_message)
client.close()
| [
"rosia@cisco.com"
] | rosia@cisco.com |
1c4d2891a75da6332a6c50325e24b19b854723a3 | 53a9b20319d3c4a8819a3d7e49241263754f031e | /markpad/config.py | 0c59a8d8ec0dfb1094b48d96d438c80775fab0dc | [
"MIT"
] | permissive | ololduck/markpad | a1a51b2b4e8b535e4122b3c6797dfa8a51ae7517 | 4214d543cd27c5bcfa5a5805890b151f57eb276c | refs/heads/master | 2021-12-25T20:59:16.591084 | 2013-11-02T15:09:40 | 2013-11-02T15:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
MARKDOWN_EXTS = [
'extra',
'nl2br',
'wikilinks',
'headerid',
'codehilite',
'admonition'
]
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL",
'sqlite:///' + os.path.join(basedir, 'markpad.db'))
if('DATABASE_URL' in os.environ):
IS_SQLITE = False
else:
IS_SQLITE = True
SECRET_KEY = os.environ.get("SECRET_KEY", 'super-secret-of-death')
HOST_URL = 'localhost:5000'
| [
"contact@paulollivier.fr"
] | contact@paulollivier.fr |
d3803c693837af821705394d60822fd89719a8cc | 55ea654eb5227819a7a2b7629f89caea164d48c1 | /2018/06.01/Send_Mail.py | 404813976dd44a93ed5bde21164fc5d7e1360386 | [
"MIT"
] | permissive | Yokan-Study/study | 63161ae508943a750782be17f5df225543cdfdfc | 0aaee83b21e67301435ace7c71dd25fc1b9be7b4 | refs/heads/master | 2022-12-10T00:54:09.807867 | 2018-08-14T02:25:08 | 2018-08-14T02:25:08 | 107,958,865 | 1 | 5 | MIT | 2022-12-07T23:50:56 | 2017-10-23T09:15:22 | JavaScript | UTF-8 | Python | false | false | 20,519 | py | import pystache, requests, smtplib
import pystache.defaults
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
headers = {'Authorization': 'Basic {key}'}
text = '''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=euc-kr">
<title></title>
</head>
<body style="margin:0; padding:0;">
<table width="720" border="0" cellpadding="0" cellspacing="0" style="margin:0 auto;">
<tr>
<td style="background:#fff;">
<!-- Top -->
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td style="padding:30px 40px 25px; font-size:1px; line-height:1px; border-bottom:2px #e0e0e0 solid;"> <a href="https://www.gabia.com/?utm_source=ems&utm_medium=email&utm_term=gabia&utm_campaign=notice&utm_content=CI" target="_blank" title="์์ฐฝ"><img src="http://static.gabia.com/mail/common/logo_gabia_2016.png" alt="gabia" style="vertical-align:top; border:none;" /></a> </td>
</tr>
</table>
<!-- //Top -->
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td style="width:40px;"></td>
<td style="padding:40px 0 50px;">
<!-- +++++++++++++++++++++++++++++++ ์ปจํ
์ธ ๋ด์ฉ +++++++++++++++++++++++++++++++ -->
<!-- +++++++ Contents ++++++++ -->
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td style="font:30px/40px Malgun Gothic; letter-spacing:-1px; color:#0879c9;"> [๊ฐ๋น์] ์๋น์ค ์ด์ฉ ๊ธฐ๊ฐ์ ์ฐ์ฅํด ์ฃผ์ธ์ </td>
</tr>
<!-- [ver3] ์ธ์ฌ๋ง ๋ชจ๋ ์์! -->
<!-- ๋ชจ๋ ์ค๋ช
: ์๋
ํ์ธ์. {hanname}๋ -->
<tr>
<td style="padding:30px 0 25px; font:16px/26px Malgun Gothic; color:#767676;"> ์๋
ํ์ธ์. {hanname} ๋ </td>
</tr>
<!-- [ver3] ์ธ์ฌ๋ง ๋ชจ๋ ๋ -->
<tr>
<td style="padding-bottom:10px; font:16px/26px Malgun Gothic; color:#767676;"> ์ด์ฉ ์ค์ธ ์๋น์ค์ ๋ง๊ธฐ์ผ์ด ๊ณง ๋ค๊ฐ์ต๋๋ค.<br /> ๋ง๊ธฐ ํ์๋ ์๋น์ค ์ด์ฉ์ด ์ ํ๋ ์ ์์ผ๋, ์๋ ๋ด์ฉ์ ํ์ธํ์๊ณ <br /> ์ฐ์ฅ ๊ฐ๋ฅ ๊ธฐํ ๋ด์ ์๋น์ค๋ฅผ ์ฐ์ฅํด ์ฃผ์ธ์.<br /><br /> </td>
</tr>
<tr>
<td style="font:14px/22px Malgun Gothic; color:#666;">
<!-- -->
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:30px;"></td>
</tr>
</table>
<table border="0" cellspacing="0" cellpadding="0" style="width:100%; *width:auto; table-layout:fixed;">
<tr>
<td style="width:20px; font-size:1px; line-height:16px; vertical-align:top;">
<table border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td style="display:block; height:8px;"></td>
</tr>
</tbody>
</table> <img src="http://static.gabia.com/mail/common/bul_2016_01.png" alt="" style="vertical-align:top;"> </td>
<td style="vertical-align:top; font:18px/25px Malgun Gothic; letter-spacing:-1px; color:#0879c9;">
๋์ ์๋น์ค </td>
<td style="font:14px/20px Malgun Gothic; color:#4b5964; text-align:right;"> * ์์ฑ์ผ: {regist_date} </td>
</tr>
</table>
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:10px;"></td>
</tr>
</table>
<table cellpadding="0" cellspacing="0" style="width:100%;">
<tr>
<td style="background:#a5a5a5; font-size:1px; line-height:1px; height:2px;"></td>
</tr>
</table>
<table cellpadding="0" cellspacing="0" style="width:100%; *width:auto; table-layout:fixed; border-bottom:1px #e0e0e0 solid; word-break:break-all;">
<tr>
<th style="padding:8px 0; font:bold 14px/20px Malgun Gothic; color:#4b5964; letter-spacing:-1px; color:#4b5964; text-align:center; background:#f3f3f3; "> ์๋น์ค </th>
<th width="110" style="padding:8px 0; font:bold 14px/20px Malgun Gothic; color:#4b5964; letter-spacing:-1px; color:#4b5964; text-align:center; background:#f3f3f3; border-left:1px #e0e0e0 solid;">
๋ง๊ธฐ์ผ </th>
<th width="110" style="padding:8px 0; font:bold 14px/20px Malgun Gothic; color:#4b5964; letter-spacing:-1px; color:#4b5964; text-align:center; background:#f3f3f3; border-left:1px #e0e0e0 solid;">
๊ธฐ์ค ๊ธ์ก </th>
<th width="110" style="padding:8px 0; font:bold 14px/20px Malgun Gothic; color:#4b5964; letter-spacing:-1px; color:#4b5964; text-align:center; background:#f3f3f3; border-left:1px #e0e0e0 solid;">
์ฐ์ฅ ๊ฐ๋ฅ ๊ธฐํ </th>
</tr>{#service_list}
<tr>
<td style="padding:8px 5px; font:14px/20px Malgun Gothic; color:#4b5964; border-top:1px #e0e0e0 solid; text-align:left;"> {service_name}<br /> <span style="font-size:13px;">{domain}</span> </td>
<td style="padding:8px 5px; font:14px/20px Malgun Gothic; color:#4b5964; border-top:1px #e0e0e0 solid; border-left:1px #e0e0e0 solid; text-align:center;">
{expiration_date} </td>
<td style="padding:8px 5px; font:14px/20px Malgun Gothic; color:#4b5964; border-top:1px #e0e0e0 solid; border-left:1px #e0e0e0 solid; text-align:right;"> {extension_expense}/{extension_period} </td>
<td style="padding:8px 5px; font:14px/20px Malgun Gothic; color:#4b5964; border-top:1px #e0e0e0 solid; border-left:1px #e0e0e0 solid; text-align:center;"> <b style="color:#f00;">{extendable_limit}</b>์ผ ๋จ์ </td>
</tr>{/service_list}</table>
<table cellpadding="0" cellspacing="0" style="width:100%; *width:auto; table-layout:fixed; border-bottom:1px #a5a5a5 solid; word-break:break-all;">
<tr>
<td style="text-align:right; padding:10px; font:bold 16px/1.2em Malgun Gothic; color:#000; background:#f8f8f8"> ์ด {total_count} ๊ฑด </td>
</tr>
</table>
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:25px;"></td>
</tr>
</table>
<div style="text-align:center;">
<!-- <a href="https://www.gabia.com/mygabia/extend" style="display:inline-block;vertical-align:top;padding:8px 30px 10px;color:#666;font:12px/14px Malgun Gothic;text-decoration:none;border:1px solid #cdcdcd;background:#f6f6f6" target="blank" title="์์ฐฝ">์๋น์ค ์ฐ์ฅํ๊ธฐ <img src="https://static.gabia.com/mail/common/icon_btn_arrow.png" alt="" style="padding-left:15px;vertical-align:middle; border:none;"></a> --><a href="https://www.gabia.com/mygabia/extend?utm_source=gabia&utm_medium=email&utm_term=%EC%84%9C%EB%B9%84%EC%8A%A4%EC%97%B0%EC%9E%A5%ED%95%98%EA%B8%B0&utm_campaign=%EA%B3%B5%ED%86%B5" target="_blank" title="์์ฐฝ"
rel="noopener"><img src="http://static.gabia.com/mail/2018/ems/btn_ems_04.png" alt="์๋น์ค ์ฐ์ฅํ๊ธฐ"></a> </div>
<!-- -->
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:30px;"></td>
</tr>
</table>
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td style="width:20px; font-size:1px; line-height:16px; vertical-align:top;">
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:8px;"></td>
</tr>
</table> <img src="http://static.gabia.com/mail/common/bul_2016_01.png" alt="" style="vertical-align:top;"> </td>
<td style="vertical-align:top; font:18px/25px Malgun Gothic; letter-spacing:-1px; color:#0879c9;">
์ ์์ฌํญ </td>
</tr>
</table>
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:10px;"></td>
</tr>
</table>
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr>
<td style="width:16px; font-size:1px; line-height:7px; vertical-align:top;">
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:8px;"></td>
</tr>
</table> <img src="http://static.gabia.com/mail/common/renew_edm_bul01.gif" alt="" style="vertical-align:top; margin-left:2px;"> </td>
<td style="padding-bottom:5px; vertical-align:top; font:14px/20px Malgun Gothic; color:#666;">
๋ณธ ๋ฉ์ผ์ {regist_date}์ ์์ฑ๋์์ผ๋ฏ๋ก, ์ด๋ฏธ ์๋น์ค๋ฅผ ์ฐ์ฅํ์ ๋ถ์ ๋ค์ ์ฐ์ฅํ์ง ์์๋ ๋ฉ๋๋ค. </td>
</tr>
<tr>
<td style="width:16px; font-size:1px; line-height:7px; vertical-align:top;">
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:8px;"></td>
</tr>
</table> <img src="http://static.gabia.com/mail/common/renew_edm_bul01.gif" alt="" style="vertical-align:top; margin-left:2px;"> </td>
<td style="padding-bottom:5px; vertical-align:top; font:14px/20px Malgun Gothic; color:#666;">
๋ง๊ธฐ์ผ์ด ์ง๋๋ฉด ๋๋ฉ์ธ ์ฌ์ฉ์ด ์ ํ๋๋ฉฐ, ์ฌ์ฉ์ด ์ ํ๋ ์ดํ์ ๋๋ฉ์ธ์ ์ฐ์ฅํ๋ฉด ์ ์์ ์ผ๋ก ์๋น์ค๋ฅผ ์ด์ฉํ๊ธฐ๊น์ง 1 ~ 3์ผ์ ๋ ์์๋ ์ ์์ต๋๋ค. </td>
</tr>
<tr>
<td style="width:16px; font-size:1px; line-height:7px; vertical-align:top;">
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:8px;"></td>
</tr>
</table> <img src="http://static.gabia.com/mail/common/renew_edm_bul01.gif" alt="" style="vertical-align:top; margin-left:2px;"> </td>
<td style="padding-bottom:5px; vertical-align:top; font:14px/20px Malgun Gothic; color:#666;">
๋๋ฉ์ธ ์ญ์ ์ผ์ด ์๋ฐํ์ฌ ์ฐ์ฅ๋ ๊ฑด์ ๋ํด์๋ ๋คํธ์ํฌ, ์ํ์ฌ ํต์ ๋ฑ์ ๋ฌธ์ ๋ก ์ฐ์ฅ์ด ์ ๋ ์ ์์ต๋๋ค. ์ด์ ๋ํด์๋ ๊ฐ๋น์์์ ์ฑ
์์ ์ง์ง ์์ผ๋, ๋ฏธ๋ฆฌ ์ฐ์ฅํด ์ฃผ์๊ธฐ ๋ฐ๋๋๋ค. </td>
</tr>
<tr>
<td style="width:16px; font-size:1px; line-height:7px; vertical-align:top;">
<table border="0" cellpadding="0" cellspacing="0">
<tr>
<td style="display:block; height:8px;"></td>
</tr>
</table> <img src="http://static.gabia.com/mail/common/renew_edm_bul01.gif" alt="" style="vertical-align:top; margin-left:2px;"> </td>
<td style="padding-bottom:5px; vertical-align:top; font:14px/20px Malgun Gothic; color:#666;">
๋๋ฉ์ธ ์ข
๋ฅ์ ๋ฐ๋ผ <strong>๋ง๊ธฐ์ผ ์ ์ ์ญ์ ๋ ์๋ ์์ต๋๋ค</strong>. ์ฐ์ฅ ๊ฐ๋ฅ ๊ธฐํ ๋ด์ ์ฐ์ฅํด์ผ ๋๋ฉ์ธ์ด ์ญ์ ๋์ง ์์ต๋๋ค. </td>
</tr>
</table>
</td>
<td style="width:40px;"></td>
</table>
<!-- Footer -->
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td style="padding:40px; border-top:2px #e0e0e0 solid; font:14px/23px Malgun Gothic; color:#767676;"> โป ์ด ๋ฉ์ผ์ ๋ฐ์ ์ ์ฉ์
๋๋ค. </td>
</tr>
<tr>
<td style="padding:24px; border-top:1px #e0e0e0 solid; border-bottom:1px #e0e0e0 solid; font:12px/12px Malgun Gothic; color:#ccc; text-align:center;"> <a href="https://company.gabia.com/?utm_source=ems&utm_medium=email&utm_term=footer&utm_campaign=notice&utm_content=%ED%9A%8C%EC%82%AC%EC%86%8C%EA%B0%9C" style="color:#767676; text-decoration:none;" target="_blank"
title="์์ฐฝ">ํ์ฌ์๊ฐ</a> | <a href="https://www.gabia.com/agreements/index.php?utm_source=ems&utm_medium=email&utm_term=footer&utm_campaign=notice&utm_content=%EC%95%BD%EA%B4%80" style="color:#767676; text-decoration:none;"
target="_blank" title="์์ฐฝ">์ฝ๊ด</a> | <a href="https://www.gabia.com/privacy_policy?utm_source=ems&utm_medium=email&utm_term=footer&utm_campaign=notice&utm_content=%EA%B0%9C%EC%9D%B8%EC%A0%95%EB%B3%B4%EC%B7%A8%EA%B8%89%EB%B0%A9%EC%B9%A8"
style="color:#767676; text-decoration:none;" target="_blank" title="์์ฐฝ">๊ฐ์ธ์ ๋ณด์ฒ๋ฆฌ๋ฐฉ์นจ</a> | <a href="https://customer.gabia.com/?utm_source=ems&utm_medium=email&utm_term=footer&utm_campaign=notice&utm_content=%EA%B3%A0%EA%B0%9D%EC%84%BC%ED%84%B0"
style="color:#767676; text-decoration:none;" target="_blank" title="์์ฐฝ">๊ณ ๊ฐ์ผํฐ</a> </td>
</tr>
<tr>
<td style="padding:30px 10px; font:12px/20px Malgun Gothic; color:#767676; text-align:center;"> (์ฃผ)๊ฐ๋น์ ๊ฒฝ๊ธฐ๋ ์ฑ๋จ์ ๋ถ๋น๊ตฌ ๋์ํ๊ต๋ก 660, B๋ 4์ธต(์ผํ๋)<br> ๋ํ์ ํ 1544-4370 ๋ฉ์ผ/๋น/์ด๋ฌ๋ 1661-4370<br><br> โGabia Inc. All Rights Reserved. </td>
</tr>
</table>
<!-- //Footer -->
</td>
</tr>
</table>
</body>
</html>
'''
data = {
'hanname':'%ํ์์ด๋ฆ%',
'regist_date':'%์์ฑ์ผ%',
'service_list':[
{'service_name':'%์๋น์ค๋ช
%',
'domain':'%๋๋ฉ์ธ%',
'expiration_date':'%๋ง๊ธฐ์ผ%',
'extension_expense':'%๊ธฐ์ค ๊ฐ๊ฒฉ%',
'extension_period':'%๊ธฐ์ค ๊ธฐ๊ฐ%',
'extendable_limit':'%n%'
},
{'service_name':'%์๋น์ค๋ช
%',
'domain':'%๋๋ฉ์ธ%',
'expiration_date':'%๋ง๊ธฐ์ผ%',
'extension_expense':'%๊ธฐ์ค ๊ฐ๊ฒฉ%',
'extension_period':'%๊ธฐ์ค ๊ธฐ๊ฐ%',
'extendable_limit':'%n%'
},
{'service_name':'%์๋น์ค๋ช
%',
'domain':'%๋๋ฉ์ธ%',
'expiration_date':'%๋ง๊ธฐ์ผ%',
'extension_expense':'%๊ธฐ์ค ๊ฐ๊ฒฉ%',
'extension_period':'%๊ธฐ์ค ๊ธฐ๊ฐ%',
'extendable_limit':'%n%'
}],
'total_count':'3'
}
# data dictionary์ value์ ํด๋นํ๋ ๊ฐ์ DB์์ ๋ถ๋ฌ์์ผ ํจ
# ์์ด๋/์๋น์ค๋ฒํธ ์
๋ ฅ ์ ์๋ ์์ฑ๋๋๋ก
# regist_date๋ ์ค๋ ๋ ์ง(์์คํ
๋ ์ง?) ์
๋ ฅ
pystache.defaults.DELIMITERS = ('{', '}')
mail_text = pystache.render(text, data)
# print(mail_text)
smtp = smtplib.SMTP('smtp.gmail.com', 587)
smtp.ehlo()
smtp.starttls()
smtp.login('jya9055@gmail.com', '{๋น๋ฐ๋ฒํธ}')
msg = MIMEMultipart('alternative')
msg.attach(MIMEText(mail_text, 'html'))
msg['Subject'] = '[๊ฐ๋น์] ์๋น์ค ์ด์ฉ ๊ธฐ๊ฐ์ ์ฐ์ฅํด ์ฃผ์ธ์'
msg['To'] = 'jya@gabia.com'
smtp.sendmail('jya9055@gmail.com', 'jya@gabia.com', msg.as_string())
smtp.quit | [
"jya9055@gmail.com"
] | jya9055@gmail.com |
ffa39f22831b11734d04b3e3eea7856437400115 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0401_0450/LeetCode422_ValidWordSquare.py | ee694477735ad1d5c38aa096a5f0bfdceae3713d | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 449 | py | '''
Created on Apr 13, 2017
@author: MT
'''
c_ Solution(o..
___ validWordSquare words
__ n.. words: r.. F..
___ i, word1 __ e..(words
word2 ''
___ j __ r..(l..(word1:
__ j >_ l..(words
r.. F..
__ i >_ l..(words[j]
r.. F..
word2 += words[j][i]
__ word1 !_ word2:
r.. F..
r.. T..
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
8e8d6e02afe119d471e20a1ce2cf4091b144d836 | f045faa2ce09bebd4f878b1219fc4983587c8c79 | /flearn/models/femnist/cnn2.py | d180ebb0906dc9c036d8928bea56241b44227a69 | [] | no_license | eepLearning/federated-learning | 9db7babb9453fd20dc0dcac0202d4a806287754f | b647689bb035929f2661ebe3e8a3b0c94423bcf2 | refs/heads/master | 2023-01-12T19:13:56.108284 | 2020-08-06T15:35:56 | 2020-08-06T15:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,708 | py | import tensorflow as tf
import tqdm
import numpy as np
from flearn.models.base_model import BaseModel
class Model(BaseModel):
def __init__(self, num_classes, image_size, options, optimizer, seed=1):
# params
self.num_classes = num_classes
self.image_size = image_size
# ไฝฟ็จ mini-batch ็ๆฐ้
self.num_inner_steps = options['num_inner_steps']
self.batch_size = options['batch_size']
self.inner_lr = options['lr']
super(Model, self).__init__(optimizer=optimizer, seed=seed, options=options)
def create_conv_variables(self, kernel_size, in_dim, out_dim, conv_name, kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d):
"""
ๅๅปบๅท็งฏๅฑ็ๅ้
:param kernel_size:
:param in_dim:
:param out_dim:
:param conv_name:
:param kernel_initializer:
:return:
"""
w = tf.get_variable(conv_name + '_w', [kernel_size, kernel_size, in_dim, out_dim], initializer=kernel_initializer())
b = tf.get_variable(conv_name + '_b', initializer=tf.zeros([out_dim]))
return (w, b)
def create_fc_variables(self, in_dim, out_dim, fc_name,
weight_initializer=tf.contrib.layers.xavier_initializer):
"""
ๅๅปบ dense ๅฑ็็ธๅ
ณๅ้
:param in_dim:
:param out_dim:
:param fc_name:
:param weight_initializer:
:return:
"""
w = tf.get_variable(fc_name + '_w', [in_dim, out_dim], initializer=weight_initializer())
b = tf.get_variable(fc_name + '_b', initializer=tf.zeros([out_dim]))
return (w, b)
def create_params(self):
"""
ๅๅปบ็ฝ่ทฏ็ๅๆฐ. ็ฝ็ป็ๅๆฐไฟๅญๅจ
:param input_channel:
:param kernel_size:
:return: ๅๆฐ dict: Dict[name] -> variable
"""
weights = {}
with tf.variable_scope('MAML', reuse=tf.AUTO_REUSE):
(weights['conv1w'], weights['conv1b']) = self.create_conv_variables(5, 1, 32, 'conv1')
(weights['conv2w'], weights['conv2b']) = self.create_conv_variables(5, 32, 64, 'conv2')
(weights['fc1w'], weights['fc1b']) = self.create_fc_variables(7 * 7 * 64, 2048, 'fc1')
(weights['fc2w'], weights['fc2b']) = self.create_fc_variables(2048, self.num_classes, 'fc2')
return weights
def conv_block(self, x, weight, bias, scope):
"""
build a block with conv2d->pooling. ๆๆถๅ ้ค batch_norm ็่ฎพ็ฝฎ
:param x: ่พๅ
ฅ็ๅผ ้
:param weight: conv2d ็ weight
:param bias: conv2d ็ bias
:param scope:
:return:
"""
# conv
x = tf.nn.conv2d(x, weight, [1, 1, 1, 1], 'SAME', name=scope + '_conv2d') + bias
x = tf.nn.relu(x, name=scope + '_relu')
# pooling
x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID', name=scope + '_pool')
return x
def fc_block(self, x, weight, bias, name, flatten=False, act=tf.nn.relu):
"""
ๅๅๆไฝ
:param x:
:param weight:
:param bias:
:param name:
:param flatten: ๆฏๅฆๆๅนณๅ่พๅ
ฅ
:param act: ่พๅบไนๅ็ๆฟๆดปๅฝๆฐ
:return:
"""
if flatten:
x = tf.reshape(x, [-1, np.prod([int(dim) for dim in x.get_shape()[1:]])], name=name + '_flatten')
x = tf.add(tf.matmul(x, weight), bias, name=name + '_out')
if act is not None:
x = act(x, name=name + '_act')
return x
def forward(self, x, weights):
"""
่พๅ
ฅๅฐ่พๅบ็ๅฎไน
:param x:
:param weights:
:return:
"""
hidden1 = self.conv_block(x, weights['conv1w'], weights['conv1b'], 'conv1')
hidden2 = self.conv_block(hidden1, weights['conv2w'], weights['conv2b'], 'conv2')
output = self.fc_block(hidden2, weights['fc1w'], weights['fc1b'], name='fc1', flatten=True)
output = self.fc_block(output, weights['fc2w'], weights['fc2b'], name='fc2', act=None, flatten=False)
return output
def create_model(self):
"""
ๅๅปบๅบๆฌไฝ ็ๆจกๅ
:param optimizer:
:return:
"""
support_features = tf.placeholder(tf.float32, shape=[self.num_inner_steps, self.batch_size, self.image_size * self.image_size], name='support_features')
query_features = tf.placeholder(tf.float32, shape=[self.num_inner_steps, self.batch_size, self.image_size * self.image_size], name='query_features')
# ่ฝฌๆขไธบๅผ ้
support_labels = tf.placeholder(tf.int64, shape=[self.num_inner_steps, self.batch_size], name='support_labels')
query_labels = tf.placeholder(tf.int64, shape=[self.num_inner_steps, self.batch_size], name='query_labels')
# ๅบไบ support, ่ฎก็ฎไธๆฌกๅๆฐ
self.weights = self.create_params()
def support_update(inputx):
# inputx: ็ฌฌไธไธช็ปดๅบฆไธบ batch_size
one_support_features_batch, one_support_label_batch = inputx
one_support_features_batch_reshaped = tf.reshape(one_support_features_batch, [-1, self.image_size, self.image_size, 1])
one_support_label_batch_onehot = tf.one_hot(one_support_label_batch, depth=self.num_classes)
# ๅฉ็จ็ฝ็ป่ฟ่กๅๅ
support_pred_logitis = self.forward(one_support_features_batch_reshaped, self.weights)
support_correct_count = tf.count_nonzero(
tf.equal(tf.argmax(one_support_label_batch_onehot, axis=1),
tf.argmax(tf.nn.softmax(support_pred_logitis, dim=1), axis=1)))
support_loss = tf.nn.softmax_cross_entropy_with_logits(logits=support_pred_logitis,
labels=one_support_label_batch_onehot)
support_loss_mean = tf.reduce_mean(support_loss)
# ่ฟ้่ฎก็ฎไธๆฌกๆขฏๅบฆ
grads = tf.gradients(support_loss_mean, list(self.weights.values()))
# ๆดๆฐๅฝๅ็็ฝ็ปๅๆฐ
gradients = dict(zip(self.weights.keys(), grads))
fast_weights = dict(
zip(self.weights.keys(), [self.weights[key] - self.inner_lr * gradients[key] for key in self.weights.keys()]))
# ๅฐ fast weight ๆดๆฐๅฐ weights
self.weights = fast_weights
return (support_loss_mean, support_correct_count)
def query_calc_loss(inputx):
# inputx: ็ฌฌไธไธช็ปดๅบฆไธบ batch_size
one_support_features_batch, one_support_label_batch = inputx
one_support_features_batch_reshaped = tf.reshape(one_support_features_batch, [-1, self.image_size, self.image_size, 1])
one_support_label_batch_onehot = tf.one_hot(one_support_label_batch, depth=self.num_classes)
# ๅฉ็จ็ฝ็ป่ฟ่กๅๅ
support_pred_logitis = self.forward(one_support_features_batch_reshaped, self.weights)
support_correct_count = tf.count_nonzero(
tf.equal(tf.argmax(one_support_label_batch_onehot, axis=1),
tf.argmax(tf.nn.softmax(support_pred_logitis, dim=1), axis=1)), dtype=tf.int64)
support_loss = tf.nn.softmax_cross_entropy_with_logits(logits=support_pred_logitis,
labels=one_support_label_batch_onehot)
support_loss_mean = tf.reduce_mean(support_loss)
# ่ฟ้่ฎก็ฎไธๆฌกๆขฏๅบฆ
grads = tf.gradients(support_loss_mean, list(self.weights.values()))
# # ๆดๆฐๅฝๅ็็ฝ็ปๅๆฐ
# gradients = dict(zip(self.weights.keys(), grads))
return (support_loss_mean, support_correct_count, grads)
# TODO ๆ ๆณๅจๅฆๅคไธไธช loop ไธญๅบ็จๅ
ๅ็ๅ้: https://www.shuzhiduo.com/A/q4zVZejWzK/
num_weights = len(self.weights)
output_shape = (tf.float32, tf.int64)
# ่ฟไธคไธชๅไธบๅ้, ้ฟๅบฆไธบๅพช็ฏ็ๆฌกๆฐ
sprt_losses, sprt_corrects = tf.map_fn(support_update, dtype=output_shape, elems=(support_features, support_labels), parallel_iterations=self.num_inner_steps)
output_shape = (tf.float32, tf.int64, [tf.float32] * num_weights)
qry_losses, qry_corrects, grads = tf.map_fn(query_calc_loss, dtype=output_shape,
elems=(query_features, query_labels),
parallel_iterations=self.num_inner_steps)
# ่ฟ้็ loss ้่ฆๅนณๅไธไธ, ๆ็
ง
return (support_features, query_features), (support_labels, query_labels), None, grads, qry_corrects, qry_losses
def create_model_bak(self):
"""
ๅๅปบๅบๆฌไฝ ็ๆจกๅ
:param optimizer:
:return:
"""
support_features = tf.placeholder(tf.float32, shape=[None, self.image_size * self.image_size], name='support_features')
query_features = tf.placeholder(tf.float32, shape=[None, self.image_size * self.image_size], name='query_features')
# ่ฝฌๆขไธบๅผ ้
support_input_layer = tf.reshape(support_features, [-1, self.image_size, self.image_size, 1], name='support_features_reshaped')
query_input_layer = tf.reshape(query_features, [-1, self.image_size, self.image_size, 1], name='query_features_reshaped')
support_labels = tf.placeholder(tf.int64, shape=[None], name='support_labels')
query_labels = tf.placeholder(tf.int64, shape=[None], name='query_labels')
support_labels_onehot = tf.one_hot(support_labels, depth=self.num_classes, name='support_labels_onehot')
query_labels_onehot = tf.one_hot(query_labels, depth=self.num_classes, name='query_labels_onehot')
# ๅบไบ support, ่ฎก็ฎไธๆฌกๅๆฐ
self.weights = self.create_params()
# self.adam_optimizer.create_momtems(self.weights)
###### ็ดๆฅๅฎไนๅๆฐ
######
support_pred_logitis = self.forward(support_input_layer, self.weights)
support_correct_count = tf.count_nonzero(
tf.equal(tf.argmax(support_labels_onehot, axis=1), tf.argmax(tf.nn.softmax(support_pred_logitis, dim=1), axis=1)))
support_loss = tf.nn.softmax_cross_entropy_with_logits(logits=support_pred_logitis, labels=support_labels_onehot)
# ่ฟไธช็จๆฅ้ช่ฏๆฏๅฆๆฑไบๅจquery้ถๆฎตๆฑไบไบ้ถๅฏผๆฐ, sparse ๆฒกๆไบ้ถๅฏผๆฐ็ๅฎ็ฐ. ๅฆๆๆฒกๆๆฅ้่ฏฏ, ่ฏดๆๆฒกๆๆฑๅพไบ้ถๅฏผๆฐ
# support_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=support_pred_logitis, labels=support_labels)
# theta' = theta - alpha * grads, ่ฟ้่ฝๅฆไฝฟ็จ adam?
# fast_weights = dict(zip(self.weights.keys(), [self.weights[key] - self.options['lr'] * gvs[key] for key in self.weights.keys()]))
####
# ่ฟ้็ loss ๆฏๅ้. ็ฐๅจๅฐฑๆฏๅธๆ่ฝๅคๆจกๆไธไธช Adam ็่ฟ็จ
support_loss_mean = tf.reduce_mean(support_loss)
grads = tf.gradients(support_loss_mean, list(self.weights.values()))
gvs = dict(zip(self.weights.keys(), grads))
fast_weights = dict(zip(self.weights.keys(), [self.weights[key] - self.options['lr'] * gvs[key] for key in self.weights.keys()]))
# train_op = self.optimizer.apply_gradients(adam_gvs)
####
# TODO ่ฟ็งๆนๅผ่กไธ้!! ๆ นๆฌๆฒกๆ่ฎก็ฎไบ้ถๅฏผๆฐ
# support_loss_mean = tf.reduce_mean(support_loss)
# adam_gvs = self.optimizer.compute_gradients(support_loss_mean)
# train_op = self.optimizer.apply_gradients(adam_gvs)
###
# # ๆฅ็ๆฏๅบไบ query
# query_pred = self.forward(query_input_layer, fast_weights)
# # ่ฎก็ฎๆๅคฑๅฝๆฐ L(f_theta'(D'))
# query_loss = tf.nn.softmax_cross_entropy_with_logits(logits=query_pred, labels=query_labels_onehot)
# # ๅบไบ่ฟไธช query ๅฎไนไผๅๅจ
# # gvs = self.optimizer.compute_gradients(query_loss)
# # train_op = self.optimizer.apply_gradients(gvs)
# # grads, _ = zip(*gvs)
#
# # eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions["classes"]))
# # return features, labels, train_op, grads, eval_metric_ops, loss
# second_order_grads = tf.gradients(query_loss, list(self.weights.values()))
# query_correct_count = tf.count_nonzero(
# tf.equal(tf.argmax(query_labels_onehot, axis=1), tf.argmax(tf.nn.softmax(query_pred, dim=1), axis=1)))
query_pred = self.forward(query_input_layer, fast_weights)
# ่ฎก็ฎๆๅคฑๅฝๆฐ L(f_theta'(D'))
query_loss = tf.nn.softmax_cross_entropy_with_logits(logits=query_pred, labels=query_labels_onehot)
# query_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=query_pred, labels=query_labels)
query_loss_mean = tf.reduce_mean(query_loss)
second_order_grads = tf.gradients(query_loss_mean, list(self.weights.values()))
query_correct_count = tf.count_nonzero(
tf.equal(tf.argmax(query_labels_onehot, axis=1), tf.argmax(tf.nn.softmax(query_pred, dim=1), axis=1)))
return (support_features, query_features), (support_labels, query_labels), None, second_order_grads, (support_correct_count, query_correct_count), (support_loss_mean, query_loss_mean)
def solve_sgd_meta_one_batch(self, sp, qr):
"""
่ฟ่กไธๆฌก SGD
:param mini_batch_data:
:return:
"""
self.adam_optimizer.increase_n()
with self.graph.as_default():
grads, loss = self.sess.run([self.grads, self.loss],
feed_dict={self.features[0]: sp[0],
self.features[1]: qr[0],
self.labels[0]: sp[1],
self.labels[1]: qr[1]})
sz = len(sp[1]) + len(qr[1])
comp = sz * self.flops
return grads, loss, comp, sz
def solve_sgd_meta_full_data(self, sp, qr):
"""
่ฟ่กไธๆฌก SGD
:param mini_batch_data:
:return:
"""
self.adam_optimizer.increase_n()
with self.graph.as_default():
grads, loss = self.sess.run([self.grads, self.loss],
feed_dict={self.features[0]: sp['x'],
self.features[1]: qr['x'],
self.labels[0]: sp['y'],
self.labels[1]: qr['y']})
sz = len(sp['y']) + len(qr['y'])
comp = sz * self.flops
return grads, loss, comp, sz
def test_meta(self, sp, qr):
all_x = np.concatenate((sp['x'], qr['x']), axis=0)
all_y = np.concatenate((sp['y'], qr['y']), axis=0)
with self.graph.as_default():
# tot_correct, loss = self.sess.run([self.eval_metric_ops, self.loss],
# feed_dict={self.features[0]: sp[0],
# self.features[1]: qr[0],
# self.labels[0]: sp[1],
# self.labels[1]: qr[1]})
tot_correct, loss = self.sess.run([self.eval_metric_ops[0], self.loss[0]],
feed_dict={self.features[0]: all_x,
self.labels[0]: all_y})
return tot_correct, loss
| [
"wangshu214@live.cn"
] | wangshu214@live.cn |
73cde5a4de955ce5c4d0711c334f2238ee76c8f0 | 6cef66753d9f4ddd66174ea5669ec859a22c7806 | /setup.py | 4e9f4dc5eb87ca8743856a1d269ab587a0d8db43 | [] | no_license | Deli-Slicer/CatBurglar | 3948922a177d1d521a72320781b1d1c09104c59b | 6c952cc4ceba4cce552f5a26151f31eb07baa72a | refs/heads/main | 2023-04-08T12:47:39.232056 | 2021-04-04T05:47:15 | 2021-04-04T05:47:15 | 352,229,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from setuptools import setup, find_packages
install_requires = ['arcade>=2.5.6']
with open("README.md", "r") as longfile:
long_description = longfile.read()
setup(
name='CatBurglar',
version='0.0.1',
install_requires=install_requires,
packages=find_packages(),
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={
"console_scripts": [
'catburglar=CatBurglar.main:main'
]
},
python_requires='>=3.7'
) | [
"36696816+pushfoo@users.noreply.github.com"
] | 36696816+pushfoo@users.noreply.github.com |
be55ff4fce3f0b3de5b3ef9bcc3905593fd75c05 | c48d63562727404de8679351122e060e723f7af3 | /program20.py | 66e75763a4a1897f08c53166b6eb354320ec5583 | [] | no_license | stephinsr/codekata | 04352cb96599186184fdb530bbfba8226dbd5cfa | 99a078e68904370bd175f97b5ce98b8984ab5e64 | refs/heads/master | 2020-04-21T16:08:20.759242 | 2019-02-19T08:49:48 | 2019-02-19T08:49:48 | 169,690,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py |
c=int(raw_input())
for i in range(1,6):
print(i*c),
| [
"noreply@github.com"
] | noreply@github.com |
76ab8b67c75a9a98e5ba698b432ab49c43cc137c | 189d75fa820ee1549e8ca90687e9a1711d9f8ded | /BinaryTree.py | 0753a7dccf7ab2489afb698ef589de0e9d1eeda0 | [] | no_license | TijanaSekaric/SP-Homework07 | f513dedcfe63842e1cc8001c0807983962154605 | bf84a89df32520b453db5694680f9d4dd5a72bca | refs/heads/master | 2020-04-11T09:15:14.462812 | 2018-12-13T18:16:31 | 2018-12-13T18:16:31 | 161,670,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py |
from Tree import Tree
class BinaryTree(Tree):
def left(self, p):
raise NotImplementedError( 'must be implemented by subclass' )
def right(self, p):
raise NotImplementedError( 'must be implemented by subclass' )
def sibling(self, p):
parent = self.parent(p)
if parent is None:
return None
else:
if p == self.left(parent):
return self.right(parent)
else:
return self.left(parent)
def children(self, p):
if self.left(p) is not None:
yield self.left(p)
if self.right(p) is not None:
yield self.right(p) | [
"tijana.sekaric@udg.edu.me"
] | tijana.sekaric@udg.edu.me |
05c4b8df052bc124bbaab8e69c5bf48121ecea0f | aa3cc4eb303c4dccc9ab28bea8fb476b18ad0305 | /problems_2022/22.py | 2d961e10b7e8192c9715efd269cc386e73a9eaa1 | [] | no_license | alexander-yu/adventofcode | 855cc8db5f47fc53a71f3cc9ec81d255847dabdc | 25268c3906d07761b149c2710362c8b80869d778 | refs/heads/master | 2023-01-12T05:07:56.360504 | 2022-12-30T19:45:39 | 2022-12-30T19:45:39 | 227,441,688 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,784 | py | import collections
import re
from utils import Vector2D
import utils
def get_data():
*points, _, path = utils.get_input(cast=str, delimiter=None)
start, grid, points_by_x, points_by_y = get_grid(points)
path = [
int(move) if move.isnumeric() else move
for move in re.findall(r'\d+|L|R', path)
]
return start, grid, path, points_by_x, points_by_y
def get_grid(point_rows):
points = {}
n_rows = 0
n_columns = 0
start = None
points_by_x = collections.defaultdict(list)
points_by_y = collections.defaultdict(list)
for i, row in enumerate(point_rows):
n_rows = i + 1
for j, value in enumerate(row):
n_columns = j + 1
if value != ' ':
point = Vector2D(j, -i)
points_by_x[j].append(point)
points_by_y[-i].append(point)
points[point] = value
if not start:
start = point
return start, utils.Grid(points, n_rows, n_columns), points_by_x, points_by_y
def get_password(point, direction):
x, y = point
row = -y + 1
col = x + 1
facing = {
(1, 0): 0,
(0, -1): 1,
(-1, 0): 2,
(0, 1): 3,
}[direction]
return 1000 * row + 4 * col + facing
@utils.part
def part_1():
start, grid, path, points_by_x, points_by_y = get_data()
direction = Vector2D(1, 0)
curr = start
for move in path:
match move:
case 'L': direction = direction.rot90(1)
case 'R': direction = direction.rot90(3)
case _:
for _ in range(move):
new = curr + direction
if new not in grid:
match direction.sign():
case 1, _: new = points_by_y[new[1]][0]
case -1, _: new = points_by_y[new[1]][-1]
case _, 1: new = points_by_x[new[0]][-1]
case _, -1: new = points_by_x[new[0]][0]
if grid[new] == '#':
break
curr = new
print(get_password(curr, direction))
FACES = {
(1, 0): 1,
(2, 0): 2,
(1, 1): 3,
(0, 2): 4,
(1, 2): 5,
(0, 3): 6,
}
def get_face(point):
x, y = point
x, y = x // 50, -y // 50
return FACES[x, y]
WRAPS = {
(1, 0, 1): lambda x, y, points_by_x, points_by_y: (points_by_y[-150 - x][0], Vector2D(1, 0)),
(1, -1, 0): lambda x, y, points_by_x, points_by_y: (points_by_y[-150 + y + 1][0], Vector2D(1, 0)),
(2, 0, 1): lambda x, y, points_by_x, points_by_y: (points_by_x[x][-1], Vector2D(0, 1)),
(2, 1, 0): lambda x, y, points_by_x, points_by_y: (points_by_y[-150 + y][-1], Vector2D(-1, 0)),
(2, 0, -1): lambda x, y, points_by_x, points_by_y: (points_by_y[-50 - x][-1], Vector2D(-1, 0)),
(3, -1, 0): lambda x, y, points_by_x, points_by_y: (points_by_x[y][0], Vector2D(0, -1)),
(3, 1, 0): lambda x, y, points_by_x, points_by_y: (points_by_x[100 + y][-1], Vector2D(0, 1)),
(4, -1, 0): lambda x, y, points_by_x, points_by_y: (points_by_y[-50 + y + 1][0], Vector2D(1, 0)),
(4, 0, 1): lambda x, y, points_by_x, points_by_y: (points_by_y[-50 - x][0], Vector2D(1, 0)),
(5, 1, 0): lambda x, y, points_by_x, points_by_y: (points_by_y[-50 + y][-1], Vector2D(-1, 0)),
(5, 0, -1): lambda x, y, points_by_x, points_by_y: (points_by_y[-150 - x][-1], Vector2D(-1, 0)),
(6, -1, 0): lambda x, y, points_by_x, points_by_y: (points_by_x[50 + y][0], Vector2D(0, -1)),
(6, 1, 0): lambda x, y, points_by_x, points_by_y: (points_by_x[50 + y][-1], Vector2D(0, 1)),
(6, 0, -1): lambda x, y, points_by_x, points_by_y: (points_by_x[100 + x][0], Vector2D(0, -1)),
}
def wrap(point, direction, points_by_x, points_by_y):
face = get_face(point)
x, y = point
x, y = x % 50, 50 - (y % 50)
return WRAPS[face, *direction.sign()](x, y, points_by_x, points_by_y)
@utils.part
def part_2():
start, grid, path, points_by_x, points_by_y = get_data()
direction = Vector2D(1, 0)
curr = start
for move in path:
match move:
case 'L': direction = direction.rot90(1)
case 'R': direction = direction.rot90(3)
case _:
for _ in range(move):
new = curr + direction
if new not in grid:
new, new_direction = wrap(curr, direction, points_by_x, points_by_y)
else:
new_direction = direction
if grid[new] == '#':
break
curr = new
direction = new_direction
print(get_password(curr, direction))
| [
"yu.alex96@gmail.com"
] | yu.alex96@gmail.com |
ae9cf46ec0943498f6e3e7c67cc2ce07c8324700 | e12a76e98d38eaa37eecca7776afac13b56dea70 | /my_site/views.py | 705b2f7b16ae97087d6a755e012406bfdbd91ce7 | [] | no_license | ZeHakim/Django_repo_cours | a579be88fe1945efcc7954549967cc5414c502ba | 6aa4381dcba906f413fb61fb90c8fbf4dde1f4e8 | refs/heads/master | 2020-07-22T16:05:29.539483 | 2019-09-09T22:37:37 | 2019-09-09T22:37:37 | 207,255,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | from django.http import HttpResponse
from django.shortcuts import render
def home_page_view(request):
return HttpResponse('Hello world !')
def home_page_view_with_render(request):
return render(request, "index.html")
def sign_up_page(request):
if request.method == "POST":
print(request.POST)
alpha = request.POST["data"]
print(alpha)
return render(request, "inscription.html",{"value1":"Valeur envoyรฉ depuis views.py"}) | [
"noreply@github.com"
] | noreply@github.com |
7a9135f75da2c2ffcd0c78979c1b7baf4d1c6c2a | bbcca03c8e248cd78c7b06dbafe3b3197404ff2e | /blogrobot/__init__.py | cdbdef445a90900ca114846c965bdef7154dafea | [] | no_license | pigga/blogrobot | 4fed63882639707ed604d8cae14b0ac13b8492e4 | f330aef2f6bfda67d30b3b65075b2ae492e9a3bb | refs/heads/master | 2020-04-13T05:09:11.673005 | 2019-02-21T07:43:58 | 2019-02-21T07:43:58 | 162,982,998 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
# ====#====#====#====
# __author__ = "Yannis"
# FileName: *.py
# Version:1.0.0
# ====#====#====#==== | [
"chenyanqing@tuscloud.io"
] | chenyanqing@tuscloud.io |
23936645c5429dbbbaad5e2fbb69f5df836ab631 | dd4d1a61ec680a86d4b569490bf2a898ea0d7557 | /appengine/predator/common/model/chrome_crash_analysis.py | 4473ea72c94ffcc660fa5ff6418f3923faf801aa | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-infra | f1a68914b47bcbe3cd8a424f43741dd74fedddf4 | 09064105713603f7bf75c772e8354800a1bfa256 | refs/heads/master | 2022-10-29T23:21:46.894543 | 2017-05-16T06:22:50 | 2017-05-16T06:22:50 | 91,423,078 | 1 | 1 | BSD-3-Clause | 2022-10-01T18:48:03 | 2017-05-16T06:23:34 | Python | UTF-8 | Python | false | false | 1,079 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from common.model.crash_analysis import CrashAnalysis
class ChromeCrashAnalysis(CrashAnalysis): # pylint: disable=W0223
"""Represents an analysis of a Chrome Crash (Cracas or Fracas)."""
# Customized properties for Fracas crash.
historical_metadata = ndb.JsonProperty(indexed=False)
channel = ndb.StringProperty(indexed=False)
def Reset(self):
super(ChromeCrashAnalysis, self).Reset()
self.historical_metadata = None
self.channel = None
def Initialize(self, crash_data):
"""(Re)Initializes a CrashAnalysis ndb.Model from ``ChromeCrashData``."""
super(ChromeCrashAnalysis, self).Initialize(crash_data)
self.channel = crash_data.channel
self.historical_metadata = crash_data.historical_metadata
@property
def customized_data(self):
return {'historical_metadata': self.historical_metadata,
'channel': self.channel}
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
38c15c39a97e7ab3d51118f6386f186dda7696d8 | a0f1bfea522d5917ae6f18d3a4ab980870feac77 | /modules/hs/analysis/instruction.py | 9c6896ba3c8d225b4552a2b47164300ff9cdddce | [
"MIT"
] | permissive | sinsai/Sahana_eden | 1d9768d19266010caf2753b66d17925fe708007a | 798688dcf206fc81d586d9af1c57a99e6f1573c5 | refs/heads/master | 2020-06-07T21:10:17.416723 | 2011-06-10T08:57:23 | 2011-06-10T08:57:23 | 1,659,383 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | """
Healthscapes Geolytics Module
@author: Nico Preston <nicopresto@gmail.com>
@author: Colin Burreson <kasapo@gmail.com>
@author: Zack Krejci <zack.krejci@gmail.com>
@copyright: (c) 2010 Healthscapes
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import enum
from utils import keygen
class Instruction:
def __init__ (self, mode, procedure, dst, *args):
self.mode = mode
self.procedure = procedure
self.dst = dst
self.args = args
| [
"fran@aidiq.com"
] | fran@aidiq.com |
e6d02d6d4900746c204ca3813f00d3443fdfdc65 | abfe82e06aeeaec902b38d12788eb43e1481c873 | /encode/migrations/0004_auto_20210115_0615.py | c388cabdc2664a9b194c89538e45742e72653d0a | [] | no_license | arushisingla/recognific_web | 14195d4ad912e972761446115e24f4ff704a2c6e | 79c6d7b34d34c8f22ba02d6a6288e827322ffe48 | refs/heads/master | 2023-03-18T17:01:52.681635 | 2021-02-19T08:35:52 | 2021-02-19T08:35:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Generated by Django 3.1.4 on 2021-01-15 06:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('encode', '0003_auto_20210115_0523'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='classname',
),
migrations.AddField(
model_name='classset',
name='students',
field=models.ManyToManyField(related_name='classname', to='encode.Student'),
),
]
| [
"muskanvaswan@gmail.com"
] | muskanvaswan@gmail.com |
d926c78d9ca4a0ffd80d8aefc3bac5797f7db7a1 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_BestCycle_AR.py | b8625fb2fc2806fe6e615c6b2e4052c583dac9c1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 155 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['BestCycle'] , ['AR'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
15968c155a2959bf322fa384c8dc1a5eec3f0010 | ed7e10f95b34185bf8f71c931cd99ff90f5d4093 | /funktown/vector.py | 5351854c8a3f7f79e3fb9288a81555dd1379b7df | [
"MIT"
] | permissive | seanjensengrey/funktown | 3bc6e0713a048e716bf597575f87f2a9bba4ebc2 | 1149254d214717165a8692e2f8437949a8a4939a | refs/heads/master | 2021-01-17T23:12:01.090854 | 2011-12-24T19:16:58 | 2011-12-24T19:16:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from .lookuptree import LookupTree
from itertools import islice
class ImmutableVector:
def __init__(self, initvalues=None):
if not initvalues: initvalues = []
self.tree = LookupTree(initvalues)
self._length = len(initvalues)
def assoc(self, index, value):
newvec = ImmutableVector()
newvec.tree = self.tree.assoc(index, value)
if index >= self._length:
newvec._length = index+1
else:
newvec._length = self._length
return newvec
def concat(self, tailvec):
newvec = ImmutableVector()
vallist = [(i + self._length, tailvec[i]) \
for i in range(0, tailvec._length)]
newvec.tree = self.tree.multi_assoc(vallist)
newvec._length = self._length + tailvec._length
return newvec
def pop(self):
if self._length == 0:
raise IndexError()
newvec = ImmutableVector()
newvec.tree = self.tree.remove(self._length-1)
newvec._length = self._length-1
return newvec
def conj(self, value):
return self.assoc(self._length, value)
def get(self, index):
if index >= self._length:
raise IndexError
return self.tree[index]
def slice(self, slc):
lst = [val for val in islice(self, slc.start, slc.stop, slc.step)]
return ImmutableVector(lst)
def __add__(self, other):
return self.concat(other)
def __iter__(self):
for i in range(0, self._length):
yield self[i]
def __len__(self):
return self._length
def __getitem__(self, index):
if isinstance(index, slice):
return self.slice(index)
return self.get(index)
| [
"zhehao.mao@gmail.com"
] | zhehao.mao@gmail.com |
1e607f9d441198acc3026f56bf81f596458e8c7b | c01fcf93c9e24d739f67fbd976b56e01e7518df7 | /Bully Algorithm/member.py | 2972bb7a57f9515e7c2f99b5c3558141a38bd5c2 | [] | no_license | asupekar/Distributed-Systems-1 | 6104e2e6774b501ce033dc162695f5254f09403b | 34d88cd79b7e6e7c3039b5ef5fb4d6c8c43e428b | refs/heads/master | 2022-04-17T23:30:54.409414 | 2020-04-20T18:50:40 | 2020-04-20T18:50:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | """
CPSC 5520, Seattle University
This is free and unencumbered software released into the public domain.
:Authors: Kevin Lundeen
:Version: f19-02
"""
import pickle
import socketserver
import sys
BUF_SZ = 1024 # tcp receive buffer size
class GroupMember(socketserver.BaseRequestHandler):
"""
A Group Member that acts as a server and responds to peers' messages
For Lab1, we respond only to HELLO messages.
"""
def handle(self):
"""
Handles the incoming messages - expects only 'HELLO' messages
"""
raw = self.request.recv(BUF_SZ) # self.request is the TCP socket connected to the client
try:
message = pickle.loads(raw)
except (pickle.PickleError, KeyError):
response = bytes('Expected a pickled message, got ' + str(raw)[:100] + '\n', 'utf-8')
else:
if message != 'HELLO':
response = pickle.dumps('Unexpected message: ' + str(message))
else:
message = ('OK', 'Happy to meet you, {}'.format(self.client_address))
response = pickle.dumps(message)
self.request.sendall(response)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python member.py PORT")
exit(1)
port = int(sys.argv[1])
with socketserver.TCPServer(('', port), GroupMember) as server:
server.serve_forever()
| [
"mhatrenishig@seattleu.edu"
] | mhatrenishig@seattleu.edu |
19cb3366f04a6d183e67d0215b58bc5700aa5fe6 | 167c6820c0c3fc17d27fb231d4fde03b560e7508 | /users/migrations/0005_auto_20200411_1028.py | f67f53d3692d02446f243f01e71ea8551cbad9f9 | [] | no_license | Camilo-Quiceno/Personal_Blog | 6c9756b8f28cbfdae641ba1bb7180699903b8763 | d8dc7b56eb172dcd98d4c87c03595e607f09f54c | refs/heads/master | 2021-06-14T05:50:16.761481 | 2020-04-16T21:01:31 | 2020-04-16T21:01:31 | 254,473,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Generated by Django 3.0.5 on 2020-04-11 15:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20200411_0844'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Category', unique=True),
),
]
| [
"camilo.quiceno.q@gmail.com"
] | camilo.quiceno.q@gmail.com |
f4710e3c72b1ac6c26e7439cfabb274dd1bde506 | 3f342f06bedddd77c5fc9ad683d28f4b6fcf7079 | /Apps/Talleres/views.py | a8a4f8e1369a5596bd10f1c88abece99b9912029 | [] | no_license | FelipeASA/WebTalleres-Django | dfc293b4b23319777f1fdfde8496eb1d9182a206 | 9b6f2acc1dc89ad424f576d7622978923c55cc0a | refs/heads/master | 2020-05-29T08:35:33.951756 | 2019-05-28T14:16:09 | 2019-05-28T14:16:09 | 189,042,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.core import serializers
from Apps.Talleres.models import TalleresModelo
from Apps.Talleres.forms import TallerCrearForm
# Create your views here.
def talleres_listar(request):
datos = TalleresModelo.objects.all()
contexto = {'lista': datos}
return render(request, 'talleres/talleres_listar.html', contexto)
def talleres_crear(request):
if request.method == "POST":
form = TallerCrearForm(request.POST)
if form.is_valid():
form.save()
return redirect('talleres_listar')
else:
form=TallerCrearForm()
contexto = {'form': form}
return render(request, 'talleres/talleres_crear.html', contexto)
def talleres_eliminar(request, id):
# obtenemos el modelo taller
taller = TalleresModelo.objects.get(id = id)
if request.method == "POST":
taller.delete()
return redirect('talleres_listar')
else:
return render(request, 'talleres/talleres_eliminar.html', {'taller': taller})
def talleres_editar(request, id):
# otenemos el modelo taller
taller = TalleresModelo.objects.get(id = id)
if request.method == "GET":
form=TallerCrearForm(instance=taller)
else:
form=TallerCrearForm(request.POST, instance=taller)
if form.is_valid():
form.save()
return redirect('talleres_listar')
contexto = {'form': form}
return render(request, 'talleres/talleres_editar.html', contexto)
def talleres_json(request):
datos = TalleresModelo.objects.all()
qs_json = serializers.serialize('json', datos)
return HttpResponse(qs_json, content_type='application/json')
| [
"f.saavedra16@gmail.com"
] | f.saavedra16@gmail.com |
50ac2e045886d2069bb686e25b1fb783ace85abf | 97f38bc0dff9498c43d13f15f4b26000874a840f | /pysp/plugins/ecksteincombettesextension.py | 36809dee8e435aa6869ed8d825e77d580a587f94 | [
"BSD-3-Clause"
] | permissive | tayucanjujieyihan/pysp | 2975330f3a7f1c2aa56d9a69be2bdd08a632d3e9 | 98dbc9f6d500b0b2485a89bb22813e6c51b64411 | refs/heads/main | 2023-05-06T17:33:07.306607 | 2021-05-26T22:44:28 | 2021-05-26T22:44:28 | 442,712,534 | 1 | 0 | NOASSERTION | 2021-12-29T08:43:26 | 2021-12-29T08:43:26 | null | UTF-8 | Python | false | false | 25,631 | py | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import pyomo.common.plugin
from six import iteritems, print_
import random
from pysp import phextension
from pysp.convergence import ConvergenceBase
from pyomo.core.base import minimize
import math
# the converger for the class - everything (primal and dual) is
# contained in the (u,v) vector of the Eckstein-Combettes extension.
class EcksteinCombettesConverger(ConvergenceBase):
def __init__(self, *args, **kwds):
ConvergenceBase.__init__(self, *args, **kwds)
self._name = "Eckstein-Combettes (u,v) norm"
# the plugin computes the metric, so we'll just provide
# it a place to stash the latest computed value.
self._last_computed_uv_norm_value = None
def computeMetric(self, ph, scenario_tree, instances):
return self._last_computed_uv_norm_value
# the primary Eckstein-Combettes extension class
class EcksteinCombettesExtension(pyomo.common.plugin.SingletonPlugin):
pyomo.common.plugin.implements(phextension.IPHExtension)
pyomo.common.plugin.alias("ecksteincombettesextension")
def __init__(self):
import random
random.seed(1234)
print("Kludge warning: set random seed to 1234")
self._check_output = False
self._JName = "PhiSummary.csv"
self._subproblems_to_queue = []
# various configuration options.
# if this is True, then the number of sub-problems
# returned may be less than the buffer length.
self._queue_only_negative_subphi_subproblems = False
# track the total number of projection steps performed (and, implicitly,
# the current projection step) in addition to the last projection step
# at which a scenario sub-problem was incorporated.
self._total_projection_steps = 0
self._projection_step_of_last_update = {} # maps scenarios to projection step number
self._converger = None
def check_optimality_conditions(self, ph):
print("Checking optimality conditions for Eckstein-Combettes plugin")
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
for variable_id in tree_node._standard_variable_ids:
expected_y = 0.0
for scenario in tree_node._scenarios:
expected_y += ((scenario._y[variable_id] * scenario._probability) / tree_node._probability)
# the expected value of the y vector should be 0 if the solution is optimal
def compute_updates(self, ph, subproblems, scenario_solve_counts):
scale_factor = 1.0 # This should be a command-line parameter
self._total_projection_steps += 1
print("Initiating projection step: %d" % self._total_projection_steps)
print("Computing updates given solutions to the following sub-problems:")
for subproblem in subproblems:
print("%s" % subproblem)
print("")
for subproblem in subproblems:
self._projection_step_of_last_update[subproblem] = self._total_projection_steps
########################################
##### compute y values and u values ####
##### these are scenario-based ##
########################################
# NOTE: z is initiaized to be xbar in the code above, but it is *not* xbar.
# NOTE: v is essentailly y bar
# NOTE: lambda is 1/rho xxxxxxxxxxxxx so if you see 1/lamba in a latex file, use rho in the py file
# ASSUME W is the Eckstein W, not the PH W
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
if ph._dual_mode is True:
raise RuntimeError("***dual_mode not supported by compute_y in plugin ")
tree_node_averages = tree_node._averages
tree_node_zs = tree_node._z
for scenario in tree_node._scenarios:
weight_values = scenario._w[tree_node._name]
rho_values = scenario._rho[tree_node._name]
var_values = scenario._x[tree_node._name]
for variable_id in tree_node._standard_variable_ids:
varval = var_values[variable_id]
if varval is not None:
if scenario._objective_sense == minimize:
if scenario._name in subproblems:
# CRITICAL: Y depends on the z and weight values that were used when solving the scenario!
z_for_solve = scenario._xbars_for_solve[tree_node._name][variable_id]
w_for_solve = scenario._ws_for_solve[tree_node._name][variable_id]
scenario._y[variable_id] = rho_values[variable_id] * (z_for_solve - varval) - w_for_solve
# check it!
#print("THIS %s SHOULD EQUAL THIS %s" % (varval + (1.0/rho_values[variable_id])*scenario._y[variable_id],z_for_solve-(1.0/rho_values[variable_id])*w_for_solve))
scenario._u[variable_id] = varval - tree_node_averages[variable_id]
else:
raise RuntimeError("***maximize not supported by compute_y in plugin ")
if self._check_output:
print("Y VALUES:")
for scenario in ph._scenario_tree._scenarios:
print(scenario._y)
print("U VALUES:")
for scenario in ph._scenario_tree._scenarios:
print(scenario._u)
# self.check_optimality_conditions(ph)
###########################################
# compute v values - these are node-based #
###########################################
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
for variable_id in tree_node._standard_variable_ids:
expected_y = 0.0
for scenario in tree_node._scenarios:
expected_y += ((scenario._y[variable_id] * scenario._probability) / tree_node._probability)
tree_node._v[variable_id] = expected_y
if self._check_output:
print("V VALUES:")
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
print(tree_node._v)
###########################################
# compute norms and test for convergence #
###########################################
p_unorm = 0.0
p_vnorm = 0.0
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
for variable_id in tree_node._standard_variable_ids:
for scenario in tree_node._scenarios:
this_v_val = tree_node._v[variable_id]
p_vnorm += tree_node._probability * this_v_val * this_v_val
this_u_val = scenario._u[variable_id]
p_unorm += scenario._probability * this_u_val * this_u_val
if self._check_output :
print("unorm^2 = " + str(p_unorm) + " vnorm^2 = " + str(p_vnorm))
p_unorm = math.sqrt(p_unorm)
p_vnorm = math.sqrt(p_vnorm)
#####################################################
# compute phi; if greater than zero, update z and w #
#####################################################
print("")
print("Initiating projection calculations...")
with open(self._JName,"a") as f:
f.write("%10d" % (ph._current_iteration))
phi = 0.0
sub_phi_map = {}
for scenario in ph._scenario_tree._scenarios:
cumulative_sub_phi = 0.0
for tree_node in scenario._node_list[:-1]:
tree_node_zs = tree_node._z
for variable_id in tree_node._standard_variable_ids:
var_values = scenario._x[tree_node._name]
varval = var_values[variable_id]
weight_values = scenario._w[tree_node._name]
if not scenario.is_variable_stale(tree_node, variable_id):
this_sub_phi_term = scenario._probability * ((tree_node_zs[variable_id] - varval) * (scenario._y[variable_id] + weight_values[variable_id]))
cumulative_sub_phi += this_sub_phi_term
with open(self._JName,"a") as f:
f.write(", %10f" % (cumulative_sub_phi))
sub_phi_map[scenario._name] = cumulative_sub_phi
phi += cumulative_sub_phi
with open(self._JName,"a") as f:
for subproblem in subproblems:
f.write(", %s" % subproblem)
f.write("\n")
print("Computed sub-phi values, by scenario:")
for scenario_name in sorted(sub_phi_map.keys()):
print(" %30s %16e" % (scenario_name, sub_phi_map[scenario_name]))
print("")
print("Computed phi: %16e" % phi)
if phi > 0:
tau = 1.0 # this is the over-relaxation parameter - we need to do something more useful
denominator = p_unorm*p_unorm + scale_factor*p_vnorm*p_vnorm
if self._check_output :
print("denominator = " + str(denominator))
theta = phi/denominator
print("Computed theta: %16e" % theta)
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
if self._check_output:
print("TREE NODE ZS BEFORE: %s" % tree_node._z)
print("TREE NODE VS BEFORE: %s" % tree_node._v)
tree_node_zs = tree_node._z
for variable_id in tree_node._standard_variable_ids:
for scenario in tree_node._scenarios:
rho_values = scenario._rho[tree_node._name]
weight_values = scenario._w[tree_node._name]
if self._check_output:
print("WEIGHT VALUE PRIOR TO MODIFICATION=",weight_values[variable_id])
print("U VALUE PRIOR TO MODIFICATION=",scenario._u[variable_id])
# print("SUBTRACTING TERM TO Z=%s" % (tau * theta * tree_node._v[variable_id]))
tree_node._z[variable_id] -= (tau * theta * scale_factor * tree_node._v[variable_id])
weight_values[variable_id] += (tau * theta * scenario._u[variable_id])
if self._check_output:
print("NEW WEIGHT FOR VARIABLE=",variable_id,"FOR SCENARIO=",scenario._name,"EQUALS",weight_values[variable_id])
# print("TREE NODE ZS AFTER: %s" % tree_node._z)
elif phi == 0.0:
print("***PHI WAS ZERO - NOT DOING ANYTHING - NO MOVES - DOING CHECK BELOW!")
pass
else:
# WE MAY NOT BE SCREWED, BUT WE'LL ASSUME SO FOR NOW.
print("***PHI IS NEGATIVE - NOT DOING ANYTHING")
if self._check_output:
print("Z VALUES:")
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
print("TREE NODE=%s",tree_node._name)
print("Zs:",tree_node._z)
# CHECK HERE - PHI SHOULD BE 0 AT THIS POINT - THIS IS JUST A CHECK
with open(self._JName,"a") as f:
f.write("%10d" % (ph._current_iteration))
# the z's have been updated - copy these to PH scenario tree xbar maps,
# so they can be correctly transmitted to instances - this plugin is
# responsible for xbar updates.
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
for variable_id in tree_node._z:
tree_node._xbars[variable_id] = tree_node._z[variable_id]
#########################################################################################
# compute the normalizers for unorm and vnorm, now that we have updated w and z values. #
#########################################################################################
unorm_normalizer = 0.0
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
this_node_unorm_normalizer = 0.0
for variable_id in tree_node._standard_variable_ids:
this_z_value = tree_node._z[variable_id]
this_node_unorm_normalizer += this_z_value**2
unorm_normalizer += tree_node._probability * this_node_unorm_normalizer
vnorm_normalizer = 0.0
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
for scenario in tree_node._scenarios:
this_scenario_vnorm_normalizer = 0.0
this_scenario_ws = scenario._w[tree_node._name]
for variable_id in tree_node._standard_variable_ids:
this_scenario_vnorm_normalizer += this_scenario_ws[variable_id]**2
vnorm_normalizer += scenario._probability * this_scenario_vnorm_normalizer
unorm_normalizer = math.sqrt(unorm_normalizer)
vnorm_normalizer = math.sqrt(vnorm_normalizer)
# print("p_unorm=",p_unorm)
# print("p_unorm_normalizer=",unorm_normalizer)
# print("p_vnorm=",p_vnorm)
# print("p_vnorm_normalizer=",vnorm_normalizer)
p_unorm /= unorm_normalizer
p_vnorm /= vnorm_normalizer
scalarized_norm = math.sqrt(p_unorm*p_unorm + p_vnorm*p_vnorm)
print("Computed separator norm: (%e,%e) - scalarized norm=%e" % (p_unorm, p_vnorm, scalarized_norm))
self._converger._last_computed_uv_norm_value = scalarized_norm
# if p_unorm < delta and p_vnorm < epsilon:
# print("Separator norm dropped below threshold (%e,%e)" % (delta, epsilon))
# return
print("")
print("Initiating post-projection calculations...")
phi = 0.0
sub_phi_to_scenario_map = {}
for scenario in ph._scenario_tree._scenarios:
cumulative_sub_phi = 0.0
for tree_node in scenario._node_list[:-1]:
tree_node_zs = tree_node._z
for variable_id in tree_node._standard_variable_ids:
var_values = scenario._x[tree_node._name]
varval = var_values[variable_id]
weight_values = scenario._w[tree_node._name]
if not scenario.is_variable_stale(tree_node, variable_id):
this_sub_phi_term = scenario._probability * ((tree_node_zs[variable_id] - varval) * (scenario._y[variable_id] + weight_values[variable_id]))
cumulative_sub_phi += this_sub_phi_term
with open(self._JName,"a") as f:
f.write(", %10f" % (cumulative_sub_phi))
if not cumulative_sub_phi in sub_phi_to_scenario_map:
sub_phi_to_scenario_map[cumulative_sub_phi] = []
sub_phi_to_scenario_map[cumulative_sub_phi].append(scenario._name)
phi += cumulative_sub_phi
print("Computed sub-phi values (scenario, phi, iters-since-last-incorporated):")
for sub_phi in sorted(sub_phi_to_scenario_map.keys()):
print_(" %16e: " % sub_phi, end="")
for scenario_name in sub_phi_to_scenario_map[sub_phi]:
print("%30s %4d" % (scenario_name,
self._total_projection_steps - self._projection_step_of_last_update[scenario_name]))
print("")
print("Computed phi: %16e" % phi)
with open(self._JName,"a") as f:
f.write("\n")
negative_sub_phis = [sub_phi for sub_phi in sub_phi_to_scenario_map if sub_phi < 0.0]
if len(negative_sub_phis) == 0:
print("**** YIKES! QUEUING SUBPROBLEMS AT RANDOM****")
# TBD - THIS ASSUMES UNIQUE PHIS, WHICH IS NOT ALWAYS THE CASE.
all_phis = sub_phi_to_scenario_map.keys()
random.shuffle(all_phis)
for phi in all_phis[0:ph._async_buffer_length]:
scenario_name = sub_phi_to_scenario_map[phi][0]
if ph._scenario_tree.contains_bundles():
print("****HERE****")
print("SCENARIO=",scenario_name)
print("SCENARIO BUNDLE=",self._scenario_tree.get_scenario_bundle(scenario_name))
foobar
else:
print("Queueing sub-problem=%s" % scenario_name)
self._subproblems_to_queue.append(scenario_name)
else:
if self._queue_only_negative_subphi_subproblems:
print("Queueing sub-problems whose scenarios possess the most negative phi values:")
else:
print("Queueing sub-problems whose scenarios possess the smallest phi values:")
sorted_phis = sorted(sub_phi_to_scenario_map.keys())
for phi in sorted_phis[0:ph._async_buffer_length]:
if ((self._queue_only_negative_subphi_subproblems) and (phi < 0.0)) or (not self._queue_only_negative_subphi_subproblems):
scenario_name = sub_phi_to_scenario_map[phi][0]
print_("%30s %16e" % (scenario_name,phi), end="")
self._subproblems_to_queue.append(scenario_name)
print("")
def reset(self, ph):
self.__init__()
def pre_ph_initialization(self, ph):
"""Called before PH initialization"""
pass
def post_instance_creation(self, ph):
"""Called after the instances have been created"""
with open(self._JName,"w") as f:
f.write("Phi Summary; generally two lines per iteration\n")
f.write("Iteration ")
for scenario in ph._scenario_tree._scenarios:
f.write(", %10s" % (scenario._name))
f.write(", Subproblems Returned")
f.write("\n")
def post_ph_initialization(self, ph):
"""Called after PH initialization"""
# IMPORTANT: if the Eckstein-Combettes extension plugin is enabled,
# then make sure PH is in async mode - otherwise, nothing
# will work!
if not ph._async_mode:
raise RuntimeError("PH is not in async mode - this is required for the Eckstein-Combettes extension")
self._total_projection_steps = 0
for scenario in ph._scenario_tree._scenarios:
self._projection_step_of_last_update[scenario._name] = 0
# NOTE: we don't yet have a good way to get keyword options into
# plugins - so this is mildy hack-ish. more hackish, but
# useful, would be to extract the value from an environment
# variable - similar to what is done in the bounds extension.
# the convergence threshold should obviously be parameterized
self._converger = EcksteinCombettesConverger(convergence_threshold=1e-5)
ph._convergers.append(self._converger)
##########################################################
# the following callbacks are specific to synchronous PH #
##########################################################
def post_iteration_0_solves(self, ph):
"""Called after the iteration 0 solves"""
# we want the PH estimates of the weights initially, but we'll compute them afterwards.
ph._ph_weight_updates_enabled = False
# we will also handle xbar updates (z).
ph._ph_xbar_updates_enabled = False
def post_iteration_0(self, ph):
"""Called after the iteration 0 solves, averages computation, and weight computation"""
print("POST ITERATION 0 CALLBACK")
# define y and u parameters for each non-leaf variable in each scenario.
print("****ADDING Y, U, V, and Z PARAMETERS")
for scenario in ph._scenario_tree._scenarios:
scenario._y = {}
scenario._u = {}
# instance = scenario._instance
for tree_node in scenario._node_list[:-1]:
nodal_index_set = tree_node._standard_variable_ids
assert nodal_index_set is not None
scenario._y.update((variable_id, 0.0) for variable_id in nodal_index_set)
scenario._u.update((variable_id, 0.0) for variable_id in nodal_index_set)
# print "YS AFTER UPDATE:",scenario._y
# define v and z parameters for each non-leaf variable in the tree.
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
nodal_index_set = tree_node._standard_variable_ids
assert nodal_index_set is not None
tree_node._v = dict((i,0) for i in nodal_index_set)
tree_node._z = dict((i,tree_node._averages[i]) for i in nodal_index_set)
# copy z to xbar in the scenario tree, as we've told PH we will be taking care of it.
for stage in ph._scenario_tree._stages[:-1]:
for tree_node in stage._tree_nodes:
nodal_index_set = tree_node._standard_variable_ids
assert nodal_index_set is not None
tree_node._xbars = dict((i,tree_node._z[i]) for i in nodal_index_set)
# mainly to set up data structures.
for subproblem in ph._scenario_tree.subproblems:
self.asynchronous_pre_scenario_queue(ph, subproblem.name)
# pick subproblems at random - we need a number equal to the async buffer length,
# although we need all of them initially (PH does - not this particular plugin).
async_buffer_length = ph._async_buffer_length
all_subproblems = [subproblem.name for subproblem in ph._scenario_tree.subproblems]
random.shuffle(all_subproblems)
self._subproblems_to_queue = all_subproblems[0:ph._async_buffer_length]
def pre_iteration_k_solves(self, ph):
"""Called before each iteration k solve"""
pass
def post_iteration_k_solves(self, ph):
"""Called after the iteration k solves"""
pass
def post_iteration_k(self, ph):
"""Called after the iteration k is finished"""
pass
##########################################################
###########################################################
# the following callbacks are specific to asynchronous PH #
###########################################################
def pre_asynchronous_solves(self, ph):
"""Called before the asynchronous solve loop is executed"""
pass
def asynchronous_pre_scenario_queue(self, ph, subproblem_name):
"""Called right before each subproblem solve is been queued"""
scenarios_to_process = []
if ph._scenario_tree.contains_bundles():
for scenario_name in ph._scenario_tree.get_bundle(subproblem_name).scenario_names:
scenarios_to_process.append(ph._scenario_tree.get_scenario(scenario_name))
else:
scenarios_to_process.append(ph._scenario_tree.get_scenario(subproblem_name))
# we need to cache the z and w that were used when solving the input scenario.
for scenario in scenarios_to_process:
scenario._xbars_for_solve = {}
for tree_node in scenario._node_list[:-1]:
scenario._xbars_for_solve[tree_node._name] = dict((k,v) for k,v in iteritems(tree_node._z))
scenario._ws_for_solve = {}
for tree_node in scenario._node_list[:-1]:
scenario._ws_for_solve[tree_node._name] = dict((k,v) for k,v in iteritems(scenario._w[tree_node._name]))
def post_asynchronous_var_w_update(self, ph, subproblems, scenario_solve_counts):
"""Called after a batch of asynchronous sub-problems are solved and corresponding statistics are updated"""
print("")
print("Computing updates in Eckstein-Combettes extension")
self.compute_updates(ph, subproblems, scenario_solve_counts)
def post_asynchronous_solves(self, ph):
"""Called after the asynchronous solve loop is executed"""
pass
def asynchronous_subproblems_to_queue(self, ph):
"""Called after subproblems within buffer length window have been processed"""
result = self._subproblems_to_queue
self._subproblems_to_queue = []
return result
###########################################################
def post_ph_execution(self, ph):
"""Called after PH has terminated"""
pass
| [
"jsiirola@users.noreply.github.com"
] | jsiirola@users.noreply.github.com |
2f6a3a8bbb54b868acd53f5024a87f6195389eaf | 1d7d8631eba89a4024b8a1beab3b82ede7e9afb8 | /venv/bin/easy_install | 02e067915e4683226e6e7f15ee4b5f41d4b01c5e | [] | no_license | alexander-chernyshev/PythonCodeReview1 | 74390d00c79cf984ea569d1a7ec866e1d1f41cd3 | ca552c71cde46f58dac61e3b686e9aaecb315eb0 | refs/heads/master | 2020-05-19T09:07:07.269295 | 2019-05-05T22:34:17 | 2019-05-05T22:34:17 | 184,939,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | #!/home/alexander/Workspace/PythonCodeReview1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"alexander.a.chernyshev@gmail.com"
] | alexander.a.chernyshev@gmail.com | |
3804bef51dde3f7326084636ad7fb6315c6f8a5e | 8fd421e609b679a51c9debfd851e5e6caead7a02 | /src/todobackend/urls.py | 0e7a90a28804cbda0199311e01ba5f2e33c5be9c | [] | no_license | mangroa/docker-ansible | ca41328c2e783c7db8e3fe8d0dc7c5526a4e3412 | 11cb8958cf01dba091b18e644e7312ad621483c4 | refs/heads/master | 2020-04-07T18:43:45.613669 | 2018-11-22T01:20:46 | 2018-11-22T01:20:46 | 158,621,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | """todobackend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from todo import urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('todo.urls')),
]
| [
"alan.mangroo@gmail.com"
] | alan.mangroo@gmail.com |
7f0f45503dc28887ddebb17492cd8a814db6b9ef | 6c2fbfc3e93641c50436632735c3f142fbc0d386 | /lda22ldamt.py | f030973a54089fad297014044f9c8f1e4d183748 | [] | no_license | bayegy/MetaGenome | eb4abb1a28a753ee139dbbee0ad8e7a38104b9cf | b028b0a76e9d548812d5606430ec5ff69ba802c1 | refs/heads/master | 2021-06-19T07:34:17.469472 | 2021-01-13T10:17:28 | 2021-01-13T10:17:28 | 159,475,278 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #!/usr/bin/env python3.8
import sys
sp, lda2, lda4, thresh = sys.argv
with open(lda2) as lda2, open(lda4, 'w') as lda4:
for line in lda2:
feature, raw_lda, sig_group, adj_lda, p_value = line.strip().split('\t')
if adj_lda == "" or float(adj_lda) >= float(thresh):
lda4.write(line)
else:
lda4.write('{}\t{}\t\t\t{}\n'.format(feature, raw_lda, p_value))
| [
"947366452@qq.com"
] | 947366452@qq.com |
983ab87b3a7b3a5a7429a711fb8752d2043ed42d | c9e3ecbb0055fc9871bca18b78f9605ce02177e7 | /contrib/errorprone/src/python/pants/contrib/errorprone/register.py | 1731d2f45f37014fb6125df7fdf86ba1ffddc605 | [
"Apache-2.0"
] | permissive | foursquare/pants | 2349434a8f1882adf8136e87772d3c13152898c2 | f0627cfa6ab05fc9a10686a499d1fb1d6ebdb68b | refs/heads/1.7.0+fsX | 2023-07-19T23:51:41.257372 | 2021-02-12T13:11:16 | 2021-02-15T08:28:37 | 24,210,090 | 1 | 1 | Apache-2.0 | 2023-07-11T08:41:59 | 2014-09-19T00:28:00 | Python | UTF-8 | Python | false | false | 514 | py | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.errorprone.tasks.errorprone import ErrorProne
def register_goals():
task(name='errorprone', action=ErrorProne).install('compile')
| [
"noreply@github.com"
] | noreply@github.com |
e650793603ccf2cefac008d1c76270721b8d1367 | 57061e611a549f9afe4f5201730a85d76a7e505f | /setup.py | 5323723ba2f8215b16c769148b156602f63760fc | [
"MIT"
] | permissive | briostack/chrome-printtopdf | 35ee5da836878107f7586a7e61f1adf6b7d8c4cb | 6b4f91ab50cbc3570c27cfd8511f3964387c356e | refs/heads/master | 2022-03-08T14:58:51.843698 | 2022-03-01T22:32:14 | 2022-03-01T22:32:14 | 94,803,813 | 1 | 0 | null | 2017-06-19T17:38:03 | 2017-06-19T17:38:03 | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/env python
from __future__ import print_function
import os
import codecs
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
setup(
name="chrome-printtopdf",
version='0.0.2',
url='https://github.com/stefanw/chrome-printtopdf',
license='MIT',
description="Get PDFs from URLs using chrome",
long_description=read('README.md'),
author='Stefan Wehrmeyer',
author_email='mail@stefanwehrmeyer.com',
packages=find_packages(),
install_requires=['aiohttp'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
]
)
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
c579f470c806a9dc5380e7a37861e02db96a8346 | a107fa9b6393bb5344e7bb9dfc5ffa2ac871347e | /client.py | fdd37575ccb4fc3952feb4788f77ca1222fe1c76 | [] | no_license | Stoney290/Clip-to-Clip | f3b129b653c57ea015e19fd56e990612966098a4 | 935abc43bd574956b8bf282521a2f9dac0f55b23 | refs/heads/master | 2021-02-05T22:29:55.935929 | 2020-05-31T18:00:50 | 2020-05-31T18:00:50 | 243,843,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | import sys
import requests
from requests.auth import HTTPBasicAuth
Backend = 'http://localhost:10000/clipboard'
if len(sys.argv) == 5:
Program, User, Pwd, Content, Expiry = sys.argv
Expiry = int(Expiry)
payload = {
'User':User,
'Pwd':Pwd,
'Content':Content,
'Expiry':Expiry
}
elif len(sys.argv) == 3:
Program, User, Pwd = sys.argv
else:
print("This is not a valid option!")
if len(sys.argv) == 5:
Result = requests.post(url=Backend, data = payload)
else:
Result = requests.get(url=Backend, auth = HTTPBasicAuth(User,Pwd))
print(f"Status Code: {Result.status_code}, Message: {Result.text}") | [
"noreply@github.com"
] | noreply@github.com |
3ab6f6aff44401651bf7b863eb662e61d3e37e89 | 00d640cf9cfd50bf9c7952bdad781f63ee6ad1ab | /light control algorithms/light_control_functions.py | f43bc673e7bafabf3a2a92453361c0e219bea9f0 | [] | no_license | samdonnelly/Light-Control-Box | ec79490a0bef1c74fa5be84062a3fc9bd188797b | c8f2c7b4d081c896b9edbafeb1d5af579d219fb8 | refs/heads/main | 2023-09-02T13:21:59.455146 | 2021-11-10T05:27:21 | 2021-11-10T05:27:21 | 425,306,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | """
Author: Sam Donnelly
Project: Desktop Christmas Tree
Date Created: October 16, 2021
Date Last Modified: October 16, 2021
Purpose:
- Functions to support the light_control_algorithms.py script
"""
# ---------------------------------------------------------------------------
# Import Libraries
# ---------------------------------------------------------------------------
from math import sin, cos
# ---------------------------------------------------------------------------
# File Locations
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Global Variables
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def sine_func(A, B, C, D, x):
return A*sin(B*x - C) + D
def cosine_func(A, B, C, D, x):
return A*cos(B*x - C) + D
| [
"samueldonnelly11@gmail.com"
] | samueldonnelly11@gmail.com |
ddc2c9668a4817787ff82b7eaead68fea1c20256 | 6cbd0925ae093de808966cf954049127b545476d | /CRM/URLS/permissions/urls.py | 294bde0c7b9d98ca022009f38d024911dd520d0e | [] | no_license | sauditore/FCRM | 7c788475f5425550359d3055386fdd8cebe8767d | 14851d828d58bf8d2dcd464176a1d6861249458a | refs/heads/master | 2021-05-22T13:21:09.835709 | 2020-04-05T06:55:31 | 2020-04-05T06:55:31 | 252,943,883 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | __author__ = 'Administrator'
from django.conf.urls import patterns, url
from CRM.Processors.Permission.FrmDeletePermission import delete_permission
from CRM.Processors.Permission.ShowPermissions import show_perms
from CRM.Processors.Permission.FrmCreatePermission import create_permission
from CRM.Processors.Permission.ShowPermissionTypes import show_permission_types
from CRM.Processors.Permission.FrmPermissionTypes import create_permission_type
from CRM.Processors.Permission.FrmViewAllPerms import show_all_perms
from CRM.Processors.Permission.FrmViewAllTypes import view_all_permission_types
from CRM.Processors.Permission.FrmDeletePermissionType import delete_permission_type
urlpatterns = patterns(
url(r'', delete_permission),
url(r'^show/$', show_perms, name='show_perms'),
url(r'^delete/$', delete_permission, name='delete_permission'),
url(r'^create/$', create_permission, name='create_permission'),
url(r'^show/types/$', show_permission_types, name='show_permission_types'),
url(r'^create/types/$', create_permission_type, name='create_permission_type'),
url(r'^show/all/$', show_all_perms, name='permission Management'),
url(r'^show/all/types/$', view_all_permission_types, name='permission type management'),
url(r'^delete/types/$', delete_permission_type, name='delete permission type')
) | [
"root@nobody.localdomain"
] | root@nobody.localdomain |
803331d02c81b15dd9eeeb88fb58de707d4c9897 | 287c663c97e7840239794fbe84ce285773b72985 | /virtual/bin/mako-render | ff06d7a97fb70f3f26a64dd2325bf6138e8c7d31 | [
"MIT"
] | permissive | mzazakeith/flask-blog | ea8e5b2da9a581eb026564c1b9e500fa0532ee88 | 2833404cc5e96ffdbfb767f35b9caf2bdcce7997 | refs/heads/master | 2020-03-21T21:24:57.296282 | 2018-07-02T20:20:24 | 2018-07-02T20:20:24 | 139,062,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/mzaza/Desktop/flask_blog/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"mzazakeith@gmail.com"
] | mzazakeith@gmail.com | |
a98066fa4f1f205b8d832995d43348b27b4b8bfa | 0b896bd19730a6b44990b921a11e361f4b1dff71 | /src/exceptions.py | 1c4ccf2627decd1ad83d7f6f8ff0a784e8bec471 | [] | no_license | sunarium/cs4920Project | fc527ffd52a0af3189820b4f817dffde86968089 | 0e879bece96198ea3883a499622d9afe0b360528 | refs/heads/master | 2020-08-06T10:11:54.383173 | 2019-11-25T07:10:51 | 2019-11-25T07:10:51 | 212,938,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | class IllegalPlayerActionError(Exception):
pass
class NetworkError(Exception):
pass | [
"m.yu@student.unsw.edu.au"
] | m.yu@student.unsw.edu.au |
19565566b932905e442427320118dae6bb6dc750 | 48f0dcb6209561a0ae9a41310afab3e723a05160 | /.config/qtile/default_config.py | 6eefa29a174c314d3d381d61285bf166444515d4 | [] | no_license | voreille/dotfiles | 6bd9709d8b5eeefd29fc04ef323c4a97a00e0a18 | fa2f452b10aaf0f124295c04dd564ee76af86474 | refs/heads/main | 2023-04-15T07:22:34.480569 | 2021-04-30T12:44:04 | 2021-04-30T12:44:04 | 340,158,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,428 | py | # Copyright (c) 2010 Aldo Cortesi
# Copyright (c) 2010, 2014 dequis
# Copyright (c) 2012 Randall Ma
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2013 horsik
# Copyright (c) 2013 Tao Sauvage
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import List # noqa: F401
from libqtile import bar, layout, widget
from libqtile.config import Click, Drag, Group, Key, Match, Screen
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
mod = "mod4"
terminal = guess_terminal()
keys = [
# Switch between windows
Key([mod], "h", lazy.layout.left(), desc="Move focus to left"),
Key([mod], "l", lazy.layout.right(), desc="Move focus to right"),
Key([mod], "j", lazy.layout.down(), desc="Move focus down"),
Key([mod], "k", lazy.layout.up(), desc="Move focus up"),
Key([mod], "space", lazy.layout.next(),
desc="Move window focus to other window"),
# Move windows between left/right columns or move up/down in current stack.
# Moving out of range in Columns layout will create new column.
Key([mod, "shift"], "h", lazy.layout.shuffle_left(),
desc="Move window to the left"),
Key([mod, "shift"], "l", lazy.layout.shuffle_right(),
desc="Move window to the right"),
Key([mod, "shift"], "j", lazy.layout.shuffle_down(),
desc="Move window down"),
Key([mod, "shift"], "k", lazy.layout.shuffle_up(), desc="Move window up"),
# Grow windows. If current window is on the edge of screen and direction
# will be to screen edge - window would shrink.
Key([mod, "control"], "h", lazy.layout.grow_left(),
desc="Grow window to the left"),
Key([mod, "control"], "l", lazy.layout.grow_right(),
desc="Grow window to the right"),
Key([mod, "control"], "j", lazy.layout.grow_down(),
desc="Grow window down"),
Key([mod, "control"], "k", lazy.layout.grow_up(), desc="Grow window up"),
Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key([mod, "shift"], "Return", lazy.layout.toggle_split(),
desc="Toggle between split and unsplit sides of stack"),
Key([mod], "Return", lazy.spawn(terminal), desc="Launch terminal"),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod], "w", lazy.window.kill(), desc="Kill focused window"),
Key([mod, "control"], "r", lazy.restart(), desc="Restart Qtile"),
Key([mod, "control"], "q", lazy.shutdown(), desc="Shutdown Qtile"),
Key([mod], "r", lazy.spawncmd(),
desc="Spawn a command using a prompt widget"),
]
groups = [Group(i) for i in "123456789"]
for i in groups:
keys.extend([
# mod1 + letter of group = switch to group
Key([mod], i.name, lazy.group[i.name].toscreen(),
desc="Switch to group {}".format(i.name)),
# mod1 + shift + letter of group = switch to & move focused window to group
Key([mod, "shift"], i.name, lazy.window.togroup(i.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(i.name)),
# Or, use below if you prefer not to switch to that group.
# # mod1 + shift + letter of group = move focused window to group
# Key([mod, "shift"], i.name, lazy.window.togroup(i.name),
# desc="move focused window to group {}".format(i.name)),
])
layouts = [
layout.Columns(border_focus_stack='#d75f5f'),
layout.Max(),
# Try more layouts by unleashing below layouts.
# layout.Stack(num_stacks=2),
# layout.Bsp(),
# layout.Matrix(),
# layout.MonadTall(),
# layout.MonadWide(),
# layout.RatioTile(),
# layout.Tile(),
# layout.TreeTab(),
# layout.VerticalTile(),
# layout.Zoomy(),
]
widget_defaults = dict(
font='sans',
fontsize=12,
padding=3,
)
extension_defaults = widget_defaults.copy()
screens = [
Screen(
bottom=bar.Bar(
[
widget.CurrentLayout(),
widget.GroupBox(),
widget.Prompt(),
widget.WindowName(),
widget.Chord(
chords_colors={
'launch': ("#ff0000", "#ffffff"),
},
name_transform=lambda name: name.upper(),
),
widget.TextBox("default config", name="default"),
widget.TextBox("Press <M-r> to spawn", foreground="#d75f5f"),
widget.Systray(),
widget.Clock(format='%Y-%m-%d %a %I:%M %p'),
widget.QuickExit(),
],
24,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
*layout.Floating.default_float_rules,
Match(wm_class='confirmreset'), # gitk
Match(wm_class='makebranch'), # gitk
Match(wm_class='maketag'), # gitk
Match(wm_class='ssh-askpass'), # ssh-askpass
Match(title='branchdialog'), # gitk
Match(title='pinentry'), # GPG key password entry
])
auto_fullscreen = True
focus_on_window_activation = "smart"
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
| [
"valentin.oreiller@gmail.com"
] | valentin.oreiller@gmail.com |
a794b38e5b1c9bc25dfef36a9d955d9cf54a7d8b | 2f0aa66e14c6595289f6a0de2bdf71e9922052a7 | /nextApi/user/migrations/0003_auto_20200818_2008.py | 6d6e0a8b8d3ab39d197ff070024c08b0dd3e56ff | [] | no_license | aimethierry/NextApi | 8f83a2b0f499fdf5118eb930baa051584cfd9aa5 | 90884ee6d900ce71116b40276dda0e97bec0b521 | refs/heads/master | 2022-12-11T09:03:54.981284 | 2020-09-19T12:40:36 | 2020-09-19T12:40:36 | 296,866,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | # Generated by Django 3.1 on 2020-08-18 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_companyacc'),
]
operations = [
migrations.AddField(
model_name='companyacc',
name='email',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AddField(
model_name='companyacc',
name='password',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AddField(
model_name='companyacc',
name='usesrname',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='companyacc',
name='company',
field=models.CharField(blank=True, max_length=120, null=True),
),
]
| [
"aime.thierry97@gmail.com"
] | aime.thierry97@gmail.com |
2067d0e3fe17457409fb06a1f32336009d091972 | 34f45b51f8d115bc183748b81b8dbea3e398eede | /case/macroit.py | 0ef0fcad8c2a6af5f093ba86be3e5e8389c4806d | [] | no_license | MaxMorais/Code2Case | 5e2d33fab907dee93600f7792904da0a0a0e7d4c | 2c4137db8c7cdb9ec398900c7c42de8281900ace | refs/heads/master | 2021-01-01T16:50:37.103541 | 2013-10-18T14:07:58 | 2013-10-18T14:07:58 | 5,938,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,911 | py | #!/usr/bin/python
"""
Interpreter of Python function
* Does not support exceptions
"""
import dis
import new
def get_cell_value(cell):
def make_closure_that_returns_value(use_this_value):
def closure_that_returns_value():
return use_this_value
return closure_that_returns_value
dummy_function = make_closure_that_returns_value(0)
dummy_function_code = dummy_function.func_code
our_function = new.function(dummy_function_code, {}, None, None, (cell,))
value_from_cell = our_function()
return value_from_cell
def unaryoperation(func):
def ufunction(self):
w_1 = self.stack.pop()
result = func(w_1)
self.stack.append(result)
return ufunction
def binaryoperation(func):
def bfunction(self):
w_2 = self.stack.pop()
w_1 = self.stack.pop()
result = func(w_1,w_2)
self.stack.append(result)
return bfunction
def binline(func,attr):
def bfunction(self):
w_2 = self.stack.pop()
w_1 = self.stack.pop()
if hasattr(w_1,attr):
self.stack.append(getattr(w_1,attr)(w_2))
else:
w_1 = func(w_1,w_2)
self.stack.append(w_1)
return bfunction
class FuncInterpreter(object):
def __init__(self,func):
assert isinstance(func,new.function)
self.func = func
self.pycode = func.func_code
self.stack = []
self.locals = {}
self.globals = func.func_globals
self.blockstack = []
self.clovars = []
if func.func_closure:
for cell in func.func_closure:
self.clovars.append(get_cell_value(cell))
def run(self):
self.next_instr = 0
while not self.dispatch():
pass
def dispatch(self):
opcode = self.nextop()
try:
fn = getattr(self,dis.opname[opcode])
except AttributeError:
raise NotImplementedError('Unsupported opcode: %s' % dis.opname[opcode])
if opcode >= dis.HAVE_ARGUMENT:
oparg = self.nextarg()
return fn(oparg)
else:
return fn()
def nextop(self):
c = self.pycode.co_code[self.next_instr]
self.next_instr += 1
return ord(c)
def nextarg(self):
lo = self.nextop()
hi = self.nextop()
return (hi<<8) + lo
### accessor functions ###
def getlocalvarname(self, index):
return self.pycode.co_varnames[index]
def getconstant(self, index):
return self.pycode.co_consts[index]
def getname(self, index):
return self.pycode.co_names[index]
def NOP(f):
pass
def LOAD_DEREF(self,varindex):
self.stack.append(self.clovars[varindex])
def LOAD_FAST(self, varindex):
varname = self.getlocalvarname(varindex)
self.stack.append(self.locals[varname])
def LOAD_CONST(self, constindex):
w_const = self.getconstant(constindex)
self.stack.append(w_const)
def STORE_FAST(self, varindex):
varname = self.getlocalvarname(varindex)
w_newvalue = self.stack.pop()
self.locals[varname] = w_newvalue
def POP_TOP(self):
if self.condstack:
self.condstack.pop()
self.stack.pop()
def ROT_TWO(self):
w_1 = self.stack.pop()
w_2 = self.stack.pop()
self.stack.append(w_1)
self.stack.append(w_2)
def ROT_THREE(self):
w_1 = self.stack.pop()
w_2 = self.stack.pop()
w_3 = self.stack.pop()
self.stack.append(w_1)
self.stack.append(w_3)
self.stack.append(w_2)
def ROT_FOUR(self):
w_1 = self.stack.pop()
w_2 = self.stack.pop()
w_3 = self.stack.pop()
w_4 = self.stack.pop()
self.stack.append(w_1)
self.stack.append(w_4)
self.stack.append(w_3)
self.stack.append(w_2)
def DUP_TOP(self):
w_1 = self.stack[-1]
self.stack.append(w_1)
def DUP_TOPX(f, itemcount):
assert 1 <= itemcount <= 5, "limitation of the current interpreter"
self.stack.extend(self.stack[-itemcount:])
UNARY_POSITIVE = unaryoperation(lambda x:+x)
UNARY_NEGATIVE = unaryoperation(lambda x:-x)
UNARY_NOT = unaryoperation(lambda x:not x)
UNARY_CONVERT = unaryoperation(lambda x:repr(x))
UNARY_INVERT = unaryoperation(lambda x:~x)
BINARY_POWER = binaryoperation(lambda x,y:x**y)
BINARY_MULTIPLY = binaryoperation(lambda x,y:x*y)
BINARY_TRUE_DIVIDE = binaryoperation(lambda x,y:x/y)
BINARY_FLOOR_DIVIDE = binaryoperation(lambda x,y:x//y)
BINARY_DIVIDE = binaryoperation(lambda x,y:x/y)
BINARY_MODULO = binaryoperation(lambda x,y:x%y)
BINARY_ADD = binaryoperation(lambda x,y:x+y)
BINARY_SUBTRACT = binaryoperation(lambda x,y:x-y)
BINARY_SUBSCR = binaryoperation(lambda x,y:x[y])
BINARY_LSHIFT = binaryoperation(lambda x,y:x<<y)
BINARY_RSHIFT = binaryoperation(lambda x,y:x>>y)
BINARY_AND = binaryoperation(lambda x,y:x&y)
BINARY_XOR = binaryoperation(lambda x,y:x^y)
BINARY_OR = binaryoperation(lambda x,y:x|y)
INPLACE_POWER = binline(lambda x,y:x**y,'__pow__')
INPLACE_MULTIPLY = binline(lambda x,y:x*y,'__mul__')
INPLACE_TRUE_DIVIDE = binline(lambda x,y:x/y,'__truediv__')
INPLACE_FLOOR_DIVIDE = binline(lambda x,y:x//y,'__floordiv__')
INPLACE_DIVIDE = binline(lambda x,y:x/y,'__div__')
INPLACE_MODULO = binline(lambda x,y:x%y,'__mod__')
INPLACE_ADD = binline(lambda x,y:x+y,'__add__')
INPLACE_SUBTRACT = binline(lambda x,y:x-y,'__sub__')
INPLACE_LSHIFT = binline(lambda x,y:x<<y,'__lshift__')
INPLACE_RSHIFT = binline(lambda x,y:x>>y,'__rshift__')
INPLACE_AND = binline(lambda x,y:x&y,'__and__')
INPLACE_XOR = binline(lambda x,y:x^y,'__xor__')
INPLACE_OR = binline(lambda x,y:x|y,'__or__')
def slice(f, w_start, w_end):
w_obj = self.stack.pop()
self.stack.append(w_obj[w_start:w_end])
def SLICE_0(self):
w_obj = self.stack.pop()
self.stack.append(w_obj[:])
def SLICE_1(self):
w_start = self.stack.pop()
self.stack.append(w_obj[w_start:])
def SLICE_2(self):
w_end = self.stack.pop()
self.stack.append(w_obj[:w_end])
def SLICE_3(self):
w_end = self.stack.pop()
w_start = self.stack.pop()
self.stack.append(w_obj[w_start:w_end])
def storeslice(self, w_start, w_end):
w_obj = self.stack.pop()
w_newvalue = self.stack.pop()
w_obj[w_start:w_end] = w_newvalue
def STORE_SLICE_0(self):
self.storeslice(None,None)
def STORE_SLICE_1(self):
w_start = self.stack.pop()
self.storeslice(w_start,None)
def STORE_SLICE_2(self):
w_end = self.stack.pop()
self.storeslice(None,w_end)
def STORE_SLICE_3(self):
w_end = self.stack.pop()
w_start = self.stack.pop()
self.storeslice(w_start, w_end)
def deleteslice(f, w_start, w_end):
w_obj = self.stack.pop()
del w_obj[w_start:w_end]
def DELETE_SLICE_0(self):
self.deleteslice(f.space.w_None, f.space.w_None)
def DELETE_SLICE_1(self):
w_start = self.stack.pop()
self.deleteslice(w_start, None)
def DELETE_SLICE_2(self):
w_end = self.stack.pop()
self.deleteslice(None, w_end)
def DELETE_SLICE_3(self):
w_end = self.stack.pop()
w_start = self.stack.pop()
self.deleteslice(w_start, w_end)
def STORE_SUBSCR(self):
"obj[subscr] = newvalue"
w_subscr = self.stack.pop()
w_obj = self.stack.pop()
w_newvalue = self.stack.pop()
f.space.setitem(w_obj, w_subscr, w_newvalue)
def DELETE_SUBSCR(self):
"del obj[subscr]"
w_subscr = self.stack.pop()
w_obj = self.stack.pop()
del w_obj[w_subscr]
def PRINT_EXPR(self):
w_expr = self.stack.pop()
print w_expr
def PRINT_ITEM_TO(self):
w_stream = self.stack.pop()
w_item = self.stack.pop()
if w_stream == None:
print w_item,
print w_item >> w_stream
def PRINT_ITEM(self):
w_item = self.stack.pop()
print w_item,
def PRINT_NEWLINE_TO(self):
w_stream = self.stack.pop()
if w_stream == None:
print
print >> w_stream
def PRINT_NEWLINE(self):
print
def RETURN_VALUE(self):
w_returnvalue = self.stack.pop()
return 1,w_returnvalue
def STORE_NAME(self, varindex):
w_varname = self.getname(varindex)
w_newvalue = self.stack.pop()
self.locals[w_varname] = w_newvalue
def DELETE_NAME(self, varindex):
w_varname = self.getname(varindex)
del self.locals[w_varname]
def UNPACK_SEQUENCE(self, itemcount):
w_iterable = self.stack.pop()
items = list(w_iterable)
items.reverse()
for item in items:
self.stack.append(item)
def STORE_ATTR(self, nameindex):
"obj.attributename = newvalue"
w_attributename = self.getname(nameindex)
w_obj = self.stack.pop()
w_newvalue = self.stack.pop()
setattr(w_obj,w_attributename,w_newvalue)
def DELETE_ATTR(self, nameindex):
"del obj.attributename"
w_attributename = self.getname(nameindex)
w_obj = self.stack.pop()
delattr(w_obj, w_attributename)
def STORE_GLOBAL(self, nameindex):
w_varname = self.getname(nameindex)
w_newvalue = self.stack.pop()
f.space.setitem(f.w_globals, w_varname, w_newvalue)
def DELETE_GLOBAL(self, nameindex):
w_varname = self.getname(nameindex)
f.space.delitem(f.w_globals, w_varname)
def LOAD_NAME(self, nameindex):
w_varname = self.getname(nameindex)
try:
w_value = self.locals[w_varname]
except KeyError:
pass
f.LOAD_GLOBAL(nameindex) # fall-back
def LOAD_GLOBAL(self, nameindex):
w_varname = self.getname(nameindex)
if self.globals.has_key(w_varname):
self.stack.append(self.globals[w_varname])
else:
self.stack.append(__builtins__[w_varname])
def DELETE_FAST(self, varindex):
varname = f.getlocalvarname(varindex)
del self.locals[varname]
def BUILD_TUPLE(self, itemcount):
items = [self.stack.pop() for i in range(itemcount)]
items.reverse()
w_tuple = tuple(items)
self.stack.append(w_tuple)
def BUILD_LIST(self, itemcount):
items = [self.stack.pop() for i in range(itemcount)]
items.reverse()
self.stack.append(items)
def BUILD_MAP(self, zero):
if zero != 0:
raise pyframe.BytecodeCorruption
self.stack.append(dict())
def LOAD_ATTR(self, nameindex):
"obj.attributename"
w_attributename = self.getname(nameindex)
w_obj = self.stack.pop()
w_value = getattr(w_obj, w_attributename)
self.stack.append(w_value)
def cmp_lt(w_1, w_2): return w_1 < w_2
def cmp_le(w_1, w_2): return w_1 <= w_2
def cmp_eq(w_1, w_2): return w_1 == w_2
def cmp_ne(w_1, w_2): return w_1 != w_2
def cmp_gt(w_1, w_2): return w_1 > w_2
def cmp_ge(w_1, w_2): return w_1 >= w_2
def cmp_in(w_1, w_2):
return w_1 in w_2
def cmp_not_in(w_1, w_2):
return w_1 not in w_2
def cmp_is(w_1, w_2):
return w_1 is w_2
def cmp_is_not(w_1, w_2):
return w_1 is not w_2
compare_dispatch_table = {
0: cmp_lt, # "<"
1: cmp_le, # "<="
2: cmp_eq, # "=="
3: cmp_ne, # "!="
4: cmp_gt, # ">"
5: cmp_ge, # ">="
6: cmp_in,
7: cmp_not_in,
8: cmp_is,
9: cmp_is_not,
}
def COMPARE_OP(self, testnum):
w_2 = self.stack.pop()
w_1 = self.stack.pop()
try:
testfn = self.compare_dispatch_table[testnum]
except KeyError:
raise pyframe.BytecodeCorruption, "bad COMPARE_OP oparg"
w_result = testfn(w_1, w_2)
self.stack.append(w_result)
def JUMP_FORWARD(self, stepby):
self.next_instr += stepby
def JUMP_IF_FALSE(self, stepby):
w_cond = self.stack[-1]
if not w_cond:
self.next_instr += stepby
def JUMP_IF_TRUE(self, stepby):
w_cond = self.stack[-1]
if w_cond:
self.next_instr += stepby
def JUMP_ABSOLUTE(self, jumpto):
self.next_instr = jumpto
def call_function(self, oparg, w_star=None, w_starstar=None):
n_arguments = oparg & 0xff
n_keywords = (oparg>>8) & 0xff
keywords = {}
if n_keywords:
for i in range(n_keywords):
w_value = self.stack.pop()
w_key = self.stack.pop()
key = str(w_key)
keywords[key] = w_value
arguments = [self.stack.pop() for i in range(n_arguments)]
arguments.reverse()
w_function = self.stack.pop()
w_result = w_function(*arguments,**keywords)
self.stack.append(w_result)
def CALL_FUNCTION(self, oparg):
self.call_function(oparg)
def CALL_FUNCTION_VAR(self, oparg):
w_varargs = self.stack.pop()
self.call_function(oparg, w_varargs)
def CALL_FUNCTION_KW(self, oparg):
w_varkw = self.stack.pop()
self.call_function(oparg, None, w_varkw)
def CALL_FUNCTION_VAR_KW(self, oparg):
w_varkw = self.stack.pop()
w_varargs = self.stack.pop()
self.call_function(oparg, w_varargs, w_varkw)
def BUILD_SLICE(self, numargs):
if numargs == 3:
w_step = self.stack.pop()
elif numargs == 2:
w_step = None
else:
raise pyframe.BytecodeCorruption
w_end = self.stack.pop()
w_start = self.stack.pop()
w_slice = slice(w_start, w_end, w_step)
self.stack.append(w_slice)
def LIST_APPEND(self):
w = self.stack.pop()
v = self.stack.pop()
v.append(w)
def SET_LINENO(self, lineno):
pass
def POP_BLOCK(self):
self.blockstack.pop()
def SETUP_LOOP(self, offsettoend):
self.blockstack.append(self.next_instr + offsettoend)
def FOR_ITER(self, jumpby):
w_iterator = self.stack[-1]
try:
w_nextitem = w_iterator.next()
except StopIteration:
self.next_instr += jumpby
else:
self.stack.append(w_nextitem)
def GET_ITER(self):
w_iterable = self.stack.pop()
w_iterator = iter(w_iterable)
self.stack.append(w_iterator)
def BREAK_LOOP(f):
self.next_instr = self.blockstack.pop()
| [
"max.morais.dmm@gmail.com"
] | max.morais.dmm@gmail.com |
edf2ee5f33b2c47b39839784db6d3eeb8d864c12 | a8c41ff849b1048d0ac553b8c1ef2fdd9af6eb0d | /bspwm/bspwm/scripts/bctl | b75a1f073ff63499e9ec976c35c1e61460112493 | [] | no_license | sohamb117/YukinoConf | 5e950f407744b7891053bff8dc49a3eaf270fe4e | 14d9ec55a700345267e1c87c81da9d8fa1e884e5 | refs/heads/master | 2023-03-06T21:00:39.391107 | 2021-02-19T18:51:44 | 2021-02-19T18:51:44 | 340,459,366 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,337 | #!/usr/bin/env python3
import subprocess
import os
import time
pwr = subprocess.check_output("bluetoothctl show", shell=True)
pwr = 'Powered: yes' in pwr.decode("utf-8")
if pwr:
dl = subprocess.check_output("bluetoothctl paired-devices", shell=True)
dl = list(dl.decode("utf-8").strip().split('\n'))
ds = 'Turn Off Adapter|Pair New Device|'
for i in dl:
ds += i[25:] + '|'
ds += 'Cancel'
d = subprocess.check_output(f'rofi -sep "|" -dmenu -p "" -i -font "Iosevka Nerd Font 9" -location 3 -columns 1 -xoffset -12 -yoffset 62 -width 10 -hide-scrollbar -line-padding 4 -padding 20 -lines 4 <<< "{ds}"', shell=True)
if d.decode("utf-8").strip() == "Turn Off Adapter":
os.system("bluetoothctl power off")
elif d.decode("utf-8").strip() == "Cancel":
pass
elif d.decode("utf-8").strip() == "Pair New Device":
os.system("bluetoothctl scan on &")
os.system("bluetoothctl agent on &")
time.sleep(2)
dp = subprocess.check_output("bluetoothctl devices", shell=True)
dp = list(dp.decode("utf-8").strip().split("\n"))
ps = ""
for i in dp:
if i not in dl:
ps += i[25:] + '|'
ps += "Cancel"
o = subprocess.check_output(f'rofi -sep "|" -dmenu -i -p "" -font "Iosevka Nerd Font 9" -location 3 -columns 1 -xoffset -12 -yoffset 62 -width 10 -hide-scrollbar -line-padding 4 -padding 20 -lines 4 <<< "{ps}"', shell=True)
if o.decode("utf-8").strip() =='Cancel':
pass
else:
for i in dp:
if(o.decode("utf-8").strip() in i):
wp = i
fa = wp[7:24]
os.system(f"bluetoothctl pair {fa}")
os.system("bluetoothctl agent off")
os.system("bluetoothctl scan off")
else:
for i in dl:
if(d.decode("utf-8").strip() in i):
wd = i
fa = wd[7:24]
os.system(f"bluetoothctl connect {fa}")
else:
o = subprocess.check_output('rofi -sep "|" -dmenu -i -p "" -font "Iosevka Nerd Font 9" -location 3 -columns 1 -xoffset -12 -yoffset 62 -width 10 -hide-scrollbar -line-padding 4 -padding 20 -lines 4 <<< "Turn On Adapter|Cancel"', shell=True)
if o.decode("utf-8").strip() == "Turn On Adapter":
os.system('bluetoothctl power on')
| [
"sohamb117@gmail.com"
] | sohamb117@gmail.com | |
d205eeabe1230372e52454c55429cccf3659b362 | 614cad3588af9c0e51e0bb98963075e3195e92f5 | /utils/completeness.py | bd6b0845fa36983abbad225f1ed473385db12e64 | [] | no_license | dragonlong/haoi-pose | 2810dae7f9afd0a26b3d0a5962fd9ae8a5abac58 | 43388efd911feecde588b27a753de353b8e28265 | refs/heads/master | 2023-07-01T14:18:29.029484 | 2021-08-10T10:57:42 | 2021-08-10T10:57:42 | 294,602,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,738 | py | import argparse
import os
import torch
import numpy as np
from scipy.spatial import cKDTree as KDTree
import trimesh
import glob
from joblib import Parallel, delayed
def directed_hausdorff(point_cloud1:torch.Tensor, point_cloud2:torch.Tensor, reduce_mean=True):
"""
:param point_cloud1: (B, 3, N)
:param point_cloud2: (B, 3, M)
:return: directed hausdorff distance, A -> B
"""
n_pts1 = point_cloud1.shape[2]
n_pts2 = point_cloud2.shape[2]
pc1 = point_cloud1.unsqueeze(3)
pc1 = pc1.repeat((1, 1, 1, n_pts2)) # (B, 3, N, M)
pc2 = point_cloud2.unsqueeze(2)
pc2 = pc2.repeat((1, 1, n_pts1, 1)) # (B, 3, N, M)
l2_dist = torch.sqrt(torch.sum((pc1 - pc2) ** 2, dim=1)) # (B, N, M)
shortest_dist, _ = torch.min(l2_dist, dim=2)
hausdorff_dist, _ = torch.max(shortest_dist, dim=1) # (B, )
if reduce_mean:
hausdorff_dist = torch.mean(hausdorff_dist)
return hausdorff_dist
def nn_distance(query_points, ref_points):
ref_points_kd_tree = KDTree(ref_points)
one_distances, one_vertex_ids = ref_points_kd_tree.query(query_points)
return one_distances
def completeness(query_points, ref_points, thres=0.03):
a2b_nn_distance = nn_distance(query_points, ref_points)
percentage = np.sum(a2b_nn_distance < thres) / len(a2b_nn_distance)
return percentage
def process_one(shape_dir):
# load generated shape
pc_paths = glob.glob(os.path.join(shape_dir, "fake-z*.ply"))
pc_paths = sorted(pc_paths)
gen_pcs = []
for path in pc_paths:
sample_pts = trimesh.load(path)
sample_pts = np.asarray(sample_pts.vertices)
# sample_pts = torch.tensor(sample_pts.vertices).transpose(1, 0)
gen_pcs.append(sample_pts)
# load partial input
partial_path = os.path.join(shape_dir, "raw.ply")
partial_pc = trimesh.load(partial_path)
partial_pc = np.asarray(partial_pc.vertices)
# partial_pc = torch.tensor(partial_pc.vertices).transpose(1, 0)
# completeness percentage
gen_comp = 0
for sample_pts in gen_pcs:
comp = completeness(partial_pc, sample_pts)
gen_comp += comp
gen_comp = gen_comp / len(gen_pcs)
# unidirectional hausdorff
gen_pcs = [torch.tensor(pc).transpose(1, 0) for pc in gen_pcs]
gen_pcs = torch.stack(gen_pcs, dim=0)
partial_pc = torch.tensor(partial_pc).transpose(1, 0)
partial_pc = partial_pc.unsqueeze(0).repeat((gen_pcs.size(0), 1, 1))
hausdorff = directed_hausdorff(partial_pc, gen_pcs, reduce_mean=True).item()
return gen_comp, hausdorff
def func(args):
shape_names = sorted(os.listdir(args.src))
all_shape_dir = [os.path.join(args.src, name) for name in shape_names]
results = Parallel(n_jobs=args.process, verbose=2)(delayed(process_one)(path) for path in all_shape_dir)
res_comp, res_hausdorff = zip(*results)
res_comp = np.mean(res_comp)
res_hausdorff = np.mean(res_hausdorff)
return res_hausdorff, res_comp
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--src", type=str)
parser.add_argument("-p", "--process", type=int, default=10)
parser.add_argument("-o", "--output", type=str)
args = parser.parse_args()
if args.output is None:
args.output = args.src + '-eval_UHD.txt'
res_hausdorff, res_comp = func(args)
print("Avg Unidirectional Hausdorff Distance: {}".format(res_hausdorff))
print("Avg Completeness: {}".format(res_comp))
with open(args.output, "a") as fp:
fp.write("SRC: {}\n".format(args.src))
fp.write("Avg Unidirectional Hausdorff Distance: {}\n".format(res_hausdorff))
fp.write("Avg Completeness: {}\n".format(res_comp))
if __name__ == '__main__':
main() | [
"lxiaol9@vt.edu"
] | lxiaol9@vt.edu |
c1f1d4db23e064ffee1b6dbda44258ba6713a638 | b246fc0d9f51cdcdf9f539635aaecf54a238eb5c | /py_src/mltk_qualia/task_config.py | a927de1eaa078d18a8897405a44b61c34da8be86 | [] | no_license | yv/MLTK_Qualia | 0168227f9747e13bb7265453b98a09543f551ce1 | 05a77f38295b53ce0fa83697a19607f28ad035dd | refs/heads/master | 2020-07-08T10:48:34.631353 | 2013-09-13T14:29:25 | 2013-09-13T14:29:25 | 203,649,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,291 | py | import codecs
import re
import optparse
import sys
from collections import defaultdict
from alphabet import CPPUniAlphabet
from pcfg_site_config import get_config_var
from pynlp.de.smor_pos import get_morphs
__all__=['all_tasks','get_task_pos_pairs','get_task_pos_tags',
'get_pair_alphs_by_pos', 'get_word_alphs_by_pos',
'forward_mapping_by_pos','Dataset','get_dataset']
all_tasks=[]
#['Noun-Associations_gold_forCluto.csv','NV','qualia','DE'],
# ['rektionskomposita.txt','NV','rkomp','DE']]
for lang, conf in get_config_var('dist_sim').iteritems():
assert len(lang)==2, lang
for k, dat in conf['datasets'].iteritems():
all_tasks.append([k,lang,''.join(dat['postags'])])
def get_task_pos_pairs(language=None, task_name=None):
pairs=set()
for name, lang, pospair in all_tasks:
if ((language is None or lang==language) and
(task_name is None or name==task_name)):
pairs.add(pospair)
return sorted(pairs)
def get_task_pos_tags(language=None, task_name=None):
tags=set()
for name, lang, pospair in all_tasks:
if ((language is None or lang==language) and
(task_name is None or name==task_name)):
tags.update(pospair)
return tags
class FilePatternDict(dict):
def __init__(self, pat, want_utf8=True):
self.pat=pat
self.want_utf8=want_utf8
def __missing__(self, k):
fname=self.pat%{'pos_tag':k}
alph=CPPUniAlphabet(want_utf8=self.want_utf8)
print >>sys.stderr, "[FilePatternDict] load %s"%(fname,)
alph.fromfile_utf8(file(fname))
alph.growing=False
self[k]=alph
return alph
pair_alph_cache={}
def get_pair_alphs_by_pos(language):
if language in pair_alph_cache:
return pair_alph_cache[language]
else:
pair_alph_cache[language]=value=FilePatternDict(get_config_var('dist_sim.$lang.pair_alph_pattern',{'lang':language}))
return value
word_alph_cache={}
def get_word_alphs_by_pos(language):
if language in word_alph_cache:
return word_alph_cache[language]
else:
word_alph_cache[language]=value=FilePatternDict(get_config_var('dist_sim.$lang.word_alph_pattern',{'lang':language}))
return value
_pattern_res={}
def get_regex_for_pattern(pattern):
if pattern in _pattern_res:
return _pattern_res[pattern]
else:
rx=re.compile(pattern.replace('W','(\S+)').replace('L','(\w+)'))
_pattern_res[pattern]=rx
return rx
class Dataset:
def __init__(self, name, conf, lang):
self.lang=lang
self.name=name
self.postags=''.join(conf['postags'])
pat=get_regex_for_pattern(conf['pattern'])
data=[]
labels=[]
for l in codecs.open(conf['path'],'r','UTF-8'):
m=pat.match(l)
if not m:
print >>sys.stderr, "Non-matching line:",l
else:
data.append(m.groups()[:-1])
labels.append([m.groups()[-1]])
self.data=data
self.labels=labels
def load_alphabet(self, key=None):
'''
retrieves the alphabet for word1(0) or word2(1)
or the pairs(None)
'''
if key is None:
return get_pair_alphs_by_pos(self.lang)[''.join(self.postags)]
else:
return get_word_alphs_by_pos(self.lang)[self.postags[key]]
def check_alphabets(self):
ok=True
alph=self.load_alphabet(None)
for dat in self.data:
try:
alph['_'.join(dat)]
except KeyError:
print >>sys.stderr, "missing pair:", '_'.join(dat)
ok=False
for i,p in enumerate(self.postags):
alph=self.load_alphabet(i)
for dat in self.data:
try:
alph[dat[i]]
except KeyError:
print >>sys.stderr, "missing word:", dat[i]
ok=False
return ok
def add_to_vocabulary(self, item_sets):
p=''.join(self.postags)
items=item_sets[p]
for dat in self.data:
items.add('_'.join(dat))
for i,p in enumerate(self.postags):
items=item_sets[p]
for dat in self.data:
items.add(dat[i])
def get_dataset(name, lang=None):
'''
retrieves a dataset with the matching name from the
configuration.
'''
for lang0, conf in get_config_var('dist_sim').iteritems():
if lang is not None and lang0 != lang:
continue
if name in conf['datasets']:
return Dataset(name, conf['datasets'][name], lang0)
raise KeyError(name)
_variants_cache={}
def get_variants_by_pos(language, pos_tag):
"""Returns latin1-encoded (baseform, tb-lemma) strings that
can occur in the tb_lemma attribute"""
if pos_tag != 'V':
return None
if pos_tag in _variants_cache:
return _variants_cache[language+pos_tag]
variants=[]
alph_v=get_word_alphs_by_pos(False)['V']
for i in xrange(len(alph_v)):
w=alph_v.get_sym(i)
morphs=get_morphs(w.replace('#',''),'VVINF')
for m,l,a in morphs:
if len(morphs)>1 or l!=w:
variants.append((w,l))
_variants_cache[language+pos_tag]=variants
return variants
def forward_mapping_by_pos(language, pos_tag):
var=get_variants_by_pos(language, pos_tag)
if var is None:
return None
mapping={}
for (k,v) in var:
if k not in mapping:
lst=[]
mapping[k]=lst
else:
lst=mapping[k]
if v not in lst:
lst.append(v)
return mapping
def compile_alphabets(language, suffix='', wanted_alphs=None):
pair_pat=get_config_var('dist_sim.$lang.pair_alph_pattern',{'lang':language})
word_pat=get_config_var('dist_sim.$lang.word_alph_pattern',{'lang':language})
wanted_words=defaultdict(set)
conf=get_config_var('dist_sim.'+language)
for name,cf in conf['datasets'].iteritems():
print >>sys.stderr, language, name
dat=Dataset(name,cf,language)
dat.add_to_vocabulary(wanted_words)
print >>sys.stderr, "Saving",
for k,v in wanted_words.iteritems():
if wanted_alphs is not None and k not in wanted_alphs:
continue
print >>sys.stderr, k,
if len(k)==1:
fname=word_pat%{'pos_tag':k}+suffix
else:
fname=pair_pat%{'pos_tag':k}+suffix
alph=CPPUniAlphabet(want_utf8=True)
for word in v:
alph[word]
alph.tofile(file(fname,'w'))
print >>sys.stderr
oparse=optparse.OptionParser()
oparse.add_option('--lang', dest='language',
help='compile alphabets for these languages')
oparse.add_option('--suffix', dest='suffix', default='',
help='suffix to append to alphabet filenames')
def compile_alphabets_main():
opts, args=oparse.parse_args()
if args:
wanted=args
else:
wanted=None
if opts.language is None:
languages=sorted(set([x[1] for x in all_tasks]))
else:
languages=[opts.language]
for lang in languages:
compile_alphabets(lang, opts.suffix, wanted)
| [
"versley@sfs.uni-tuebingen.de"
] | versley@sfs.uni-tuebingen.de |
cd933faabaaa6dca60203eed2ccdf2f8d8771302 | b756852d989065c180d48b8b8d084eabc6abb696 | /tp01/ex02-missionaries-cannibals/ex02_c_iddfs.py | 1b720852298884faecc14284c252f0c0a8e2a81a | [] | no_license | goncaloacteixeira/feup-iart | 02ab7d5827db8fb59336e848bc07cec97083d6a1 | e53ce8319d201d55c614596d70e356c642f4c13e | refs/heads/master | 2023-08-23T08:55:02.116116 | 2021-02-27T02:24:35 | 2021-02-27T02:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | from State import *
from Utils import *
# Iterative Deepening Depth First Search
def iddfs(state: State, goal: tuple = (0, 0, -1), max_depth: int = 11):
current_max_depth = 1
while current_max_depth != max_depth:
root = Node(state)
stack = [root]
tree = Tree(root)
# if the stack is empty we also want to terminate the loop as no more nodes are expandable
while len(stack) != 0:
# popping the first node on the stack (current highest depth node)
node = stack.pop(0)
# here we expand said node
expanded = expand(node.info)
# if the depth is not already higher than the current max allowed depth
if node.depth <= current_max_depth:
# for each expanded node we want to add it to the tree, and insert it on the stack so on
# the next loop we start by the highest depth nodes
for x in expanded:
new_node = Node(x)
tree.add_node(new_node)
node.add_edge(new_node, 1)
stack.insert(0, new_node)
solution = contains_goal(stack, goal)
if solution is not None:
print("Found Solution, Depth", solution.depth)
return solution, tree
current_max_depth += 1
return None, None
if __name__ == "__main__":
print("---- IDDFS -----")
node, tree = iddfs(State((3, 3, 1), (3, 3, 1), "Start"))
if node is not None:
print("- Tree -")
tree.print_tree()
path = find_path(node)
print("- Solution -")
print_path(path)
else:
print("- No Solution -")
| [
"goncalo.ac.teixeira@gmail.com"
] | goncalo.ac.teixeira@gmail.com |
bcc75cc2bb8799b57c6a1917372e53bda74144e0 | a43e24eca5ec318c6e32349541e34ae8dda79eda | /eCom/models.py | 94f86d1b196c10babdfc81106e911b33ae7fe711 | [] | no_license | Aziz-T/django-ile-E-Ticaret-Sitesi-projesi--E-Commerce-Web-Site-with-django | 374f14f2b47881876db90235ee2ea5a064918f48 | cef5e2da46d7b5bc2abbc3d642797518d1b82c33 | refs/heads/master | 2023-03-27T17:05:31.152851 | 2021-03-30T12:19:03 | 2021-03-30T12:19:03 | 321,477,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | from django.db import models
# Create your models here.
class Customer(models.Model):
name = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200,null=True)
class Product(models.Model):
name= models.CharField(max_length=200,null=True)
price=models.FloatField()
digital = models.BooleanField(default=False,null=True,blank=False)
class Order(models.Model):
customer=models.ForeignKey(Customer,on_delete=models.SET_NULL,blank=True,null=True)
date_order=models.DateTimeField(auto_now_add=True)
complete=models.BooleanField(default=False,null=True,blank=False)
transaction_id=models.CharField(max_length=200,null=True)
class OrderItem(models.Model):
product=models.ForeignKey(Product,on_delete=models.SET_NULL,blank=True,null=True)
order=models.ForeignKey(Order,on_delete=models.SET_NULL,blank=True,null=True)
quantity= models.IntegerField(default=0,null=True,blank=True)
date_added=models.DateTimeField(auto_now_add=True)
class ShippingAddress(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.SET_NULL,null=True)
order=models.ForeignKey(Order, on_delete=models.SET_NULL,null=True)
address=models.CharField(max_length=200,null=False)
city=models.CharField(max_length=200,null=False)
state = models.CharField(max_length=200,null=False)
zipcode = models.CharField(max_length=200,null=False)
date_added=models.DateTimeField(auto_now_add=True)
| [
"one@users.noreply.github.com"
] | one@users.noreply.github.com |
92983595364b9d6233a635a1ee97a58b611411bc | 81dd26abbbf276313ce3947fa5146970796cc471 | /discum/start/login.py | 20d6d9a04fb3fc4329734f9caee81f620a19543f | [
"MIT"
] | permissive | LYZEN77/Discord-S.C.U.M | f6e6a4a4c44f2bd3cb177411eed68a81ba8384c2 | 40a5c04fb17b74ad3877d792db926800d03bccc9 | refs/heads/master | 2023-02-11T16:48:52.058529 | 2021-01-09T15:59:06 | 2021-01-09T15:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | from ..RESTapiwrap import *
class Login:
'''
Manages HTTP authentication
'''
def __init__(self, s, discordurl, user_email, user_password, log):
self.s = s
self.discord = discordurl
self.__user_email = user_email
self.__user_password = user_password
self.log = log
self.__token = None
def GetXFingerprint(self):
url = self.discord + "experiments"
reqxfinger = Wrapper.sendRequest(self.s, 'get', url, log=self.log)
xfingerprint = json.loads(reqxfinger.content)['fingerprint']
return xfingerprint
def Connect(self):
url = self.discord + "auth/login"
self.xfingerprint = self.GetXFingerprint()
self.s.headers.update({"X-Fingerprint": self.xfingerprint})
body = {"email": self.__user_email, "password": self.__user_password, "undelete": False, "captcha_key": None, "login_source": None, "gift_code_sku_id": None}
response = Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
self.__token = json.loads(response.content)['token']
def GetToken(self):
if self.__token is None:
self.Connect()
return self.__token, self.xfingerprint
| [
"68355905+arandomnewaccount@users.noreply.github.com"
] | 68355905+arandomnewaccount@users.noreply.github.com |
6e295ce0c99120e307b1f78f4a7793a23ab243da | 709bf40a52d3aae34a2563f3cf4fdfa53b9cf1a9 | /backup.py | bb4fb3edb89257ff5021bf11017450d0c8300c38 | [] | no_license | DanielR59/twitchHossBotsDB | 26fc7f915eb757d41b134b2b4d2280fa64831d9d | 5ed8efccc4cfe0615cbda5dcef258434e83db8d8 | refs/heads/main | 2023-08-07T07:41:09.351594 | 2021-10-01T05:42:50 | 2021-10-01T05:42:50 | 405,424,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import sys
import shutil
import sys
def read_to_list(file : str) -> list:
with open(file,'r') as f:
file_list = f.read()
file_list = file_list.split(sep='\n')
file_list = list(filter(None,file_list))
file_list.remove("BANLISTEND")
return file_list
def backup_updater(original_file_list : list, backup_file_list : list, filename : str):
#checamos si es el mismo archivo
if (len(original_file_list) == len(backup_file_list)) and (original_file_list.sort()==backup_file_list.sort()):
print("Los archivos son iguales :D")
return 1
#checamos si el numero de elementos disminuyรณ
if len(original_file_list) < len(backup_file_list):
print("Copiando el backup al archivo original")
shutil.copyfile("./"+ filename+ ".backup","./"+filename)
#Si el numero aumentรณ actualizamos el backup
if len(original_file_list) > len(backup_file_list):
print("Actualizando el backup")
for element in original_file_list:
if element not in backup_file_list:
backup_file_list.append(element)
backup_file_list.append("BANLISTEND")
with open(filename+".backup", 'w') as output:
for row in backup_file_list:
output.write(str(row) + '\n')
#print("Backup actualizado :D")
if __name__ == '__main__':
try:
filename = sys.argv[1]
except:
print("Necesito el archivo perro")
sys.exit(1)
banlist = read_to_list(filename)
backup = read_to_list(filename +".backup")
backup_updater(banlist,backup,filename)
| [
"daniel_pumas_59@hotmail.com"
] | daniel_pumas_59@hotmail.com |
4020c3b3e7e7165b0196c1585615c9b95e9e33fd | 11211916f39b9d98027b64d778e52743d0c519a1 | /L3/tmp/assignments/outline.py | e7bba9d3d9acf2c3e969914fff659f06fe1cd781 | [] | no_license | mantasruigys3000/Group-Task | 87baf1bc2747323c0508f6f32ef733c3f4b50978 | 6790d74ae7fa0fe6b13733efcd75a9f4aca70ab0 | refs/heads/master | 2020-04-23T20:54:09.696659 | 2019-02-22T01:29:53 | 2019-02-22T01:29:53 | 171,454,102 | 0 | 0 | null | 2019-02-19T10:31:09 | 2019-02-19T10:31:08 | null | UTF-8 | Python | false | false | 343 | py | Amet velit etincidunt porro est quaerat etincidunt.
Velit ut velit dolor consectetur est dolor.
Voluptatem quisquam quiquia quisquam sed ut.
Non voluptatem voluptatem etincidunt.
Username: Marcus
Password: titten
Dolorem velit labore velit amet ipsum ipsum adipisci.
Quaerat labore est dolore quaerat aliquam.
Amet sit consectetur labore sed.
| [
"mantasruigys101@gmail.com"
] | mantasruigys101@gmail.com |
3a471239801b8078837935c47ac052742afca2fb | 0e61a484a3a4fc61fe2b660e25fad5744232773b | /avx2-hps4096821/bitpermutations/applications/squaring_mod_GF2N.py | 993a88bdda647d86ccc7491da1dc512403d8093f | [
"CC0-1.0"
] | permissive | OussamaDanba/ntru | 3278dae5bd18ddc9d93acb9eb4221bfec3e7ca06 | da413076b3b0fb377c3174c331462c3293193580 | refs/heads/master | 2020-04-24T08:17:40.665914 | 2019-08-31T17:11:35 | 2019-08-31T17:11:35 | 171,826,066 | 0 | 0 | CC0-1.0 | 2019-02-21T07:55:03 | 2019-02-21T07:55:01 | Python | UTF-8 | Python | false | false | 11,740 | py | from bitpermutations.data import (ONE, ZERO,
Register, Mask, IndicesMask, MaskRegister,
AllocationError)
import bitpermutations.instructions as x86
from bitpermutations.printing import print_memfunc
from bitpermutations.utils import reg_to_memfunc, split_in_size_n
import argparse
import functools
from collections import OrderedDict
def gen_sequence(e, N):
def interleave(seq):
if len(seq) % 2 == 0:
return [x for t in zip(seq[:len(seq) // 2],
seq[len(seq) // 2:]) for x in t]
else:
return ([x for t in zip(seq[:len(seq) // 2],
seq[len(seq) // 2 + 1:]) for x in t] +
[seq[len(seq) // 2]])
seq = list(range(N))
for i in range(e):
seq = interleave(seq)
return seq
def registers_to_sequence(registers):
result = sum((x.value for x in registers), [])
while result[-1] is ZERO:
result.pop()
if not result:
break
return result
def square_821_patience(out_data, in_data, n, callee_saved=0):
x = list(range(821)) + 203*[ZERO]
regs = split_in_size_n(x, 64)
seq = gen_sequence(n, 821) + 203*[ZERO]
seq_r = split_in_size_n(seq, 64)
moved = [False] * len(seq_r)
r = Register(64)
t1 = Register(64)
for i in range(callee_saved):
x86.push_callee_saved(64)
maskcache = OrderedDict()
def mask_to_register(mask):
mask = Mask.as_immediate(mask)
if mask in maskcache:
maskcache.move_to_end(mask)
return maskcache[mask]
try:
maskreg = MaskRegister(64, mask)
except AllocationError:
_, maskreg = maskcache.popitem(False)
x86.mov(maskreg, mask)
maskcache[mask] = maskreg
return maskreg
for j, inreg in enumerate(regs):
x86.mov(r, in_data[j])
for i, seqreg in enumerate(seq_r):
piledict = {}
for rotation in range(64):
ror_seqreg = seqreg[rotation:] + seqreg[:rotation]
piles = []
overlap = [x for x in ror_seqreg if x in inreg and x != ZERO]
for x in overlap:
for pile in piles:
try:
if pile[-1] <= x:
pile.append(x)
break
except IndexError: # pile is empty
pass
else: # doesn't fit on any existing pile: start a new pile
piles.append([x])
piledict[rotation] = piles
min_pile_key = min(piledict, key=lambda x: len(piledict.get(x)))
if len(piledict[0]) == len(piledict[min_pile_key]):
min_pile_key = 0
if min_pile_key > 0:
ror_seqreg = seqreg[min_pile_key:] + seqreg[:min_pile_key]
else:
ror_seqreg = seqreg
for pile in piledict[min_pile_key]:
emask = [ZERO] * 64
for bit in pile:
emask[inreg.index(bit)] = ONE
dmask = [ZERO] * 64
for bit in pile:
dmask[ror_seqreg.index(bit)] = ONE
# For consecutive bits, we do not even need pext/pdep
if (Mask.consec(dmask) and Mask.consec(emask) and
(Mask.degree(emask) < 32 or Mask.degree(dmask) < 32)):
delta = (Mask.degree(dmask) - Mask.degree(emask)) % 64
x86.mov(t1, r)
if Mask.degree(emask) < 32:
x86.iand(t1, Mask.as_immediate(emask))
x86.rol(t1, delta + min_pile_key)
min_pile_key = 0 # to avoid two rols
else:
x86.rol(t1, delta)
x86.iand(t1, Mask.as_immediate(dmask))
else:
# if we can extract using AND instead..
if Mask.consec(emask, True) and Mask.degree(emask) < 32:
x86.mov(t1, r)
x86.iand(t1, Mask.as_immediate(emask))
else:
x86.pext(t1, r, mask_to_register(emask))
x86.pdep(t1, t1, mask_to_register(dmask))
if min_pile_key > 0:
x86.rol(t1, min_pile_key)
if moved[i]: # stored per i, as it's not the outer loop
x86.xor(out_data[i], t1)
else:
x86.mov(out_data[i], t1)
moved[i] = True
x86.movq(out_data[13], 0) # to fill up all 1024 bits
x86.movq(out_data[14], 0) # to fill up all 1024 bits
x86.movq(out_data[15], 0) # to fill up all 1024 bits
for mask in maskcache.values():
mask.free()
for i in range(callee_saved):
x86.pop_callee_saved(64)
def square_821_shufbytes(out_data, in_data, n):
r = Register()
out = [Register() for _ in range(4)]
moved = [False] * 4
t1 = Register()
t2 = Register()
t3 = Register()
t4 = Register()
t5 = Register()
seq = gen_sequence(n, 821) + 203*[ZERO]
seq_regvalues = split_in_size_n(seq, 256)
for in_data_fragment in in_data:
x86.vmovdqa(r, in_data_fragment)
shift_in = shifted = r
offset = 0
for delta in range(8): # 8 possible rotations may be necessary
rol_meta = None
if delta > 0:
# if we've made the previous rotation persistent
if shift_in is shifted:
shifted = t4 if shifted is t3 else t3
d_nett = delta - offset
rol_meta = len(x86.INSTRUCTIONS), str(shifted), str(t1)
x86.macro_v256rol(shifted, shift_in, d_nett, t1, t2)
rotated = [b for d in range(d_nett) for b in shifted[d::64]]
# vpshufb cannot cross over xmm lanes
for swap_xmms in [False, True]:
if swap_xmms:
swapped = t5
x86.vpermq(swapped, shifted, '01001110')
else:
swapped = shifted
r_bytes = split_in_size_n(swapped, 8)
while True: # could be necessary to extract twice from same r
bitmask = [[] for _ in range(len(seq_regvalues))]
shufmask = [None] * 32
for k, seq_value in enumerate(seq_regvalues):
s_bytes = split_in_size_n(seq_value, 8)
s_xmms = split_in_size_n(s_bytes, 16)
r_xmms = split_in_size_n(r_bytes, 16)
for i, (s128, r128) in enumerate(zip(s_xmms, r_xmms)):
for l, s_byte in enumerate(s128):
for m, r_byte in enumerate(r128):
# if this byte is already taken;
if (shufmask[i*16 + l] is not None and
shufmask[i*16 + l] != m):
continue
bits = [ONE if x == y and x != ZERO
else ZERO
for x, y in zip(r_byte, s_byte)]
if ONE not in bits:
continue
shufmask[i*16 + l] = m
bitmask[k] += bits
break
else:
bitmask[k] += [ZERO] * 8
continue
for m, (x, y) in enumerate(zip(bits, s_byte)):
if x == ONE:
seq_regvalues[k][i*128+l*8 + m] = None
s_bytes = split_in_size_n(seq_regvalues[k], 8)
if all(x is None for x in shufmask):
break
x86.vpshufb(t2, swapped, IndicesMask(shufmask))
for k, seq_value in enumerate(seq_regvalues):
if ONE not in bitmask[k]:
continue
if not moved[k]:
x86.vpand(out[k], t2, Mask(bitmask[k]))
moved[k] = True
else:
x86.vpand(t1, t2, Mask(bitmask[k]))
x86.vpxor(out[k], out[k], t1)
# check if we used any of the rotated bits
for maskbit, bit in zip(bitmask[k], t2):
if delta > 0 and bit in rotated and maskbit is ONE:
rol_meta = None
# TODO this is an ugly hack that should be abstracted
if rol_meta is not None:
i, dest, temp = rol_meta
del x86.INSTRUCTIONS[i] # delete srlq
x86.INSTRUCTIONS[i] = x86.INSTRUCTIONS[i].replace(temp, dest)
del x86.INSTRUCTIONS[i+1] # delete permq
del x86.INSTRUCTIONS[i+1] # delete xor
else:
# if we're keeping the rotation, make it persistent so that the
# next rotation is smaller (and thus more likely ignorable)
shift_in = shifted
offset = delta
for m, r in zip(out_data, out):
x86.vmovdqa(m, r)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Output squaring routines.')
parser.add_argument('no_of_squarings', type=int,
help='the number of repeated squarings')
parser.add_argument('--callee', type=int, dest='callee', default=0,
help='the number of callee-saved registers to save')
parser.add_argument('--patience', dest='patience', action='store_true',
help='always use the patience-sort method')
parser.add_argument('--shufbytes', dest='shufbytes', action='store_true',
help='always use the shufbytes method')
parser.add_argument('--raw-name', dest='raw_name', action='store_true',
help='use minimal function name (square_N_821)')
parser.set_defaults(patience=False)
args = parser.parse_args()
if args.shufbytes:
f = functools.partial(square_821_shufbytes, n=args.no_of_squarings)
if args.raw_name:
f.__name__ = "square_{}_821".format(args.no_of_squarings)
else:
f.__name__ = "square_{}_821_shufbytes".format(args.no_of_squarings)
print_memfunc(f, 4, 4, initialize=True)
elif args.patience:
f = functools.partial(square_821_patience,
n=args.no_of_squarings, callee_saved=args.callee)
if args.raw_name:
f.__name__ = "square_{}_821".format(args.no_of_squarings)
else:
f.__name__ = "square_{}_821_patience".format(args.no_of_squarings)
print_memfunc(f, 16, 16, per_reg=64)
elif args.no_of_squarings in permutations:
f = permutations[args.no_of_squarings]
print_memfunc(f, 4, 4)
else:
raise NotImplementedError(
"There is no dedicated implementation for {} squarings. "
"Please specify either --shufbytes or --patience."
.format(args.no_of_squarings)
)
| [
"jschanck@uwaterloo.ca"
] | jschanck@uwaterloo.ca |
d6252c8443b948c869fb4eba3936535398536b9d | 079383fd8753b9900fe9db5d74de5c883f15ccf1 | /posts/migrations/0003_auto_20191210_2116.py | 013483f4bae29cec1b780e3db2e376dce0133ade | [] | no_license | amreges/Blog | 497a6ea9cc9d9472d21c97c726606e36ed9f1c4f | 386d852f0c800a7d83bfa1214f7e55dcfa83b84a | refs/heads/master | 2021-09-28T14:19:07.170381 | 2019-12-11T00:22:14 | 2019-12-11T00:22:14 | 227,240,535 | 0 | 0 | null | 2021-09-22T18:06:35 | 2019-12-11T00:18:35 | Python | UTF-8 | Python | false | false | 365 | py | # Generated by Django 3.0 on 2019-12-11 00:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20191210_1927'),
]
operations = [
migrations.RenameField(
model_name='pedido',
old_name='Pagamento',
new_name='pagamento',
),
]
| [
"reges.amanda1@gmail.com"
] | reges.amanda1@gmail.com |
0c3fe24ba0db11201013ae26551e6603037ee311 | 777070dee16cf88afe691c02e6272add54466f78 | /vagrant/flask_ajax2/app.py | 8ca05c470dc88d15fa9bda672bbd666752f1082d | [] | no_license | Swojak-A/Flask_Test_apps | 97b62f8aa613e085fe3216b89d64f92a2c094d48 | 3fd4c0480902abdee144a7233f303fcc37f10dcc | refs/heads/master | 2022-10-22T09:03:11.584528 | 2018-07-26T14:10:06 | 2018-07-26T14:10:06 | 141,889,349 | 1 | 1 | null | 2022-10-11T22:28:14 | 2018-07-22T11:18:24 | Python | UTF-8 | Python | false | false | 3,021 | py | from flask import Flask, render_template, request, jsonify
from flask_sqlalchemy import SQLAlchemy
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///fungus.db' # File-based SQL database
SQLALCHEMY_TRACK_MODIFICATIONS = False # Avoids SQLAlchemy warning
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Fungus(db.Model):
_tablename__ = "fungus"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(25), nullable=True)
lat_name = db.Column(db.String(25))
type = db.Column(db.String(25))
edible = db.Column(db.Boolean)
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/search_results", methods=['POST'])
def search_results():
column = request.form['column'].lower()
user_input = request.form['input'].lower()
# print(column)
# if column.lower() in ["name"]:
# pass
# elif column.lower() in ["latin name","lat name","lat_name"]:
#
# elif column.lower() in ["type"]:
# pass
if user_input and not column:
if user_input in ["edible"]:
fungi = Fungus.query.filter_by(edible=True).all()
elif user_input in ["poisonous", "poisonus", "poison"]:
fungi = Fungus.query.filter_by(edible=False).all()
else:
fungi = Fungus.query.filter((Fungus.name.like('%{}%'.format(user_input)) \
| Fungus.type.like('%{}%'.format(user_input)) \
| Fungus.lat_name.like('%{}%'.format(user_input)))).all()
elif user_input and (column in ["name"]):
fungi = Fungus.query.filter(Fungus.name.like('%{}%'.format(user_input))).all()
elif user_input and (column in ["latin name","lat name","lat_name"]):
fungi = Fungus.query.filter(Fungus.lat_name.like('%{}%'.format(user_input))).all()
elif user_input and (column in ["type"]):
fungi = Fungus.query.filter(Fungus.type.like('%{}%'.format(user_input))).all()
elif user_input and (column in ["edible"]):
print("user input: {}, column: {}".format(user_input, column))
if user_input in ["true", "yes", "edible"]:
fungi = Fungus.query.filter_by(edible=True).all()
elif user_input in ["false", "no", "poisonous"]:
fungi = Fungus.query.filter_by(edible=False).all()
elif user_input and (column in ["poisonous"]):
print("user input: {}, column: {}".format(user_input, column))
if user_input in ["true", "yes", "poisonous"]:
fungi = Fungus.query.filter_by(edible=False).all()
elif user_input in ["false", "no", "edible"]:
fungi = Fungus.query.filter_by(edible=True).all()
else:
fungi = Fungus.query.all()
return render_template("search-response.html", items=fungi)
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000", debug=True) | [
"swojak.a@gmail.com"
] | swojak.a@gmail.com |
aa28f1b3a188e5b90f854c9b413ac02404822933 | 2b410d4f7eb48427cb0205295f6e8a65f4c2562f | /core/schema.py | d6858eaab466f5cd5c76fac74d601ff7fd95f0f6 | [] | no_license | eavbinary/otus_pyweb_site | 27c3218dd4c0fdf9c235daca19ea9b124b7962a8 | 982cbcf01d5aed114002bce7783c3495e3f9bf3c | refs/heads/master | 2023-07-14T05:59:31.018553 | 2021-08-31T13:17:21 | 2021-08-31T13:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | import graphene
from django.contrib.auth.models import User
from graphene_django import DjangoObjectType
from .models import People, Course, StudentGroup, PeopleInGroup, Schedule
class UserType(DjangoObjectType):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email',)
class PeopleType(DjangoObjectType):
class Meta:
model = People
fields = ('id', 'user', 'is_teacher',)
class StudentGroupType(DjangoObjectType):
class Meta:
model = StudentGroup
fields = ('id', 'name', 'course', 'start_date')
class PeopleInGroupType(DjangoObjectType):
class Meta:
model = PeopleInGroup
fields = ('id', 'group', 'student')
class CourseType(DjangoObjectType):
class Meta:
model = Course
fields = ('id', 'name', 'description')
class ScheduleType(DjangoObjectType):
class Meta:
model = Schedule
fields = ('class_date', 'teacher', 'group')
class Query(graphene.ObjectType):
user = graphene.List(UserType)
people = graphene.List(PeopleType)
teacher = graphene.List(PeopleType)
student = graphene.List(PeopleType)
course = graphene.List(CourseType)
student_group = graphene.List(StudentGroupType)
people_in_group_filter = graphene.List(PeopleInGroupType, group_id=graphene.String(required=True))
people_in_group = graphene.List(PeopleInGroupType)
schedule = graphene.List(ScheduleType)
def resolve_people(self, info):
result = People.objects.all()
return result
def resolve_teacher(self, info):
result = People.objects.filter(is_teacher=True)
return result
def resolve_student(self, info):
result = People.objects.filter(is_teacher=False)
return result
def resolve_student_group(self, info):
result = StudentGroup.objects.all()
return result
def resolve_course(self, info):
result = Course.objects.all()
return result
def resolve_user(self, info):
result = User.objects.all()
return result
def resolve_people_in_group(self, info):
result = PeopleInGroup.objects.all()
return result
def resolve_people_in_group_filter(self, info, group_id):
result = PeopleInGroup.objects.filter(group_id=group_id)
return result
def resolve_schedule(self, info):
result = Schedule.objects.all()
return result
schema = graphene.Schema(query=Query)
| [
"eav.binary@ya.ru"
] | eav.binary@ya.ru |
ea5b92d695adaa4957a531b8fcb3cf4771a1f942 | aeda2662a40c97bed9512ef5d6e958c430287bce | /trying/website/forms.py | 46714dba53b2aee078c1eb004bc2418afeed311b | [] | no_license | generationzcode/sns | cfe60319132e29561cf58ea018852c81c8d23a59 | a0dc0942f8393502ff4dbba89af8501e2118d14f | refs/heads/master | 2020-09-21T05:34:17.616675 | 2019-11-28T17:02:46 | 2019-11-28T17:02:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from django import forms
from .models import Roast
from django.utils import timezone
class RoastForm(forms.Form):
roast = forms.CharField(label='roast', max_length=1000) | [
"arjunarastogi@gmail.com"
] | arjunarastogi@gmail.com |
611dd71d44e7e6ece714b4e93db36c19f786ff1c | d6d90091f5a3434deae0f834794649d8b0f77071 | /shuffleNumber.py | 02813622ffd8702ce2964cbea8e2c459876e96ca | [] | no_license | rhyep/Automate-the-Boring-Stuff-with-Python | e1a7be3e2487bf384669abc7bd47df032982546a | 91c7583f40363c37bb768cd02cae859defaeab5c | refs/heads/master | 2020-03-28T12:23:14.693695 | 2018-09-14T05:53:59 | 2018-09-14T05:53:59 | 148,292,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # This is a guess the number game.
import random
print('Hello. What is your name?')
name = input()
print('Well, ' + name + ', I am thinking of a number between 1 and 20.')
secretNumber = random.randint(1, 58)
secretNumber1 = random.randint(1, 58)
secretNumber2 = random.randint(1, 58)
secretNumber3 = random.randint(1, 58)
secretNumber4 = random.randint(1, 58)
secretNumber5 = random.randint(1, 58)
print(secretNumber)
print(secretNumber1)
print(secretNumber2)
print(secretNumber3)
print(secretNumber4)
print(secretNumber5)
| [
"rhym.sugar@gmail.com"
] | rhym.sugar@gmail.com |
40b606a75f2a3ea6ee7f290d627b798e157e9894 | 2b31366107bd56244564c196c852f39ff024e278 | /example.py | 095c2818d3c45494ec74d905b086705256aa66a9 | [
"BSD-3-Clause"
] | permissive | toastdriven/pubsubittyhub | 444a7b0d5b26abf0a1cd820d3d57a1d92346a4c4 | 8d3a0b135b0a284f52234c06cfc586cc5e6f5c6d | refs/heads/master | 2020-05-05T01:39:51.073435 | 2009-12-17T09:22:31 | 2009-12-17T09:22:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import urllib2
import sys
from urllib import urlencode
try:
import json
except ImportError:
import simplejson as json
print 'Testing index...'
content = urllib2.urlopen('http://localhost:8080/').read()
print 'Creating a channel...'
content = urllib2.urlopen('http://localhost:8080/channels', data={}).read()
print content
channel_id = json.loads(content)['id']
print "Adding subscriber to channel '%s'..." % channel_id
body = urlencode({'data': json.dumps({'channel': channel_id, 'url': sys.argv[1]})})
content = urllib2.urlopen('http://localhost:8080/subscribers', data=body).read()
print content
print "Posting message to channel '%s'..." % channel_id
body = urlencode({'data': json.dumps({'channel': channel_id, 'message': 'O HAI'})})
content = urllib2.urlopen('http://localhost:8080/messages', data=body).read()
print content
| [
"daniel@toastdriven.com"
] | daniel@toastdriven.com |
60796983cfa596e187827dbca6f9c21de957b2ab | eb06bf6726829dba01b2f5f1bb10727166865b4e | /evaluation.py | a0499df819c14bfba15a3b7e316779dc301222e9 | [] | no_license | kaveri-nadhamuni/xray-pytorch | 38d0a7e24c2041e690ef964ddea89543b06d6855 | 29d1d127e9acc53a90a988901ddb0722a5311a2d | refs/heads/master | 2020-04-20T17:05:45.226372 | 2020-04-19T21:44:13 | 2020-04-19T21:44:13 | 168,978,876 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,805 | py | import numpy
import torch
from datasets import load_dataset
from tools import encode_sentences, encode_images
def evalrank(model, data, split='dev'):
"""
Evaluate a trained model on either dev ortest
"""
print 'Loading dataset'
if split == 'dev':
X = load_dataset(data)[1]
else:
X = load_dataset(data, load_test=True)
print 'Computing results...'
ls = encode_sentences(model, X[0])
lim = encode_images(model, X[1])
(r1, r5, r10, medr) = i2t(lim, ls)
print "Image to text: %.1f, %.1f, %.1f, %.1f" % (r1, r5, r10, medr)
(r1i, r5i, r10i, medri) = t2i(lim, ls)
print "Text to image: %.1f, %.1f, %.1f, %.1f" % (r1i, r5i, r10i, medri)
def i2t(images, captions, npts=None):
"""
Images->Text (Image Annotation)
Images: (5N, K) matrix of images
Captions: (5N, K) matrix of captions
"""
if npts == None:
npts = images.size()[0] / 5
ranks = numpy.zeros(npts)
for index in range(npts):
# Get query image
im = images[5 * index].unsqueeze(0)
# Compute scores
d = torch.mm(im, captions.t())
d_sorted, inds = torch.sort(d, descending=True)
inds = inds.data.squeeze(0).cpu().numpy()
# Score
rank = 1e20
# find the highest ranking
for i in range(5*index, 5*index + 5, 1):
tmp = numpy.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)
medr = numpy.floor(numpy.median(ranks)) + 1
return (r1, r5, r10, medr)
def t2i(images, captions, npts=None, data='f8k'):
"""
Text->Images (Image Search)
Images: (5N, K) matrix of images
Captions: (5N, K) matrix of captions
"""
if npts == None:
npts = images.size()[0] / 5
ims = torch.cat([images[i].unsqueeze(0) for i in range(0, len(images), 5)])
ranks = numpy.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = captions[5*index : 5*index + 5]
# Compute scores
d = torch.mm(queries, ims.t())
for i in range(d.size()[0]):
d_sorted, inds = torch.sort(d[i], descending=True)
inds = inds.data.squeeze(0).cpu().numpy()
ranks[5 * index + i] = numpy.where(inds == index)[0][0]
# Compute metrics
r1 = 100.0 * len(numpy.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(numpy.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(numpy.where(ranks < 10)[0]) / len(ranks)
medr = numpy.floor(numpy.median(ranks)) + 1
return (r1, r5, r10, medr)
| [
"kaveri@mit.edu"
] | kaveri@mit.edu |
ee66c9dd4a0d630c6ecb661c22a3acf967691125 | 58ce8a45d03ec24b89e7502f149bef42d77ad777 | /tests/test_models_artist.py | 96a9afac325c6d5076dbf3cec399a9ae628b3fc7 | [
"MIT"
] | permissive | AndyTempel/spotify.py | db9ba8523d6dbd9bf233f963ea04fac4bf555d5e | d5a18ee59ddffd9026b36f510b45b4cc391ac557 | refs/heads/master | 2022-12-12T14:46:41.780249 | 2020-08-28T23:35:09 | 2020-08-28T23:35:09 | 291,162,036 | 0 | 0 | MIT | 2020-08-28T23:02:23 | 2020-08-28T23:02:22 | null | UTF-8 | Python | false | false | 618 | py | import asyncio
import unittest
from types import ModuleType
from common import *
class TestArtist(unittest.TestCase):
@async_with_client(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET)
async def test_artist(self, *, client):
for artist_uri in TEST_ARTISTS:
artist = await client.get_artist(artist_uri)
await async_chain([
artist.get_albums(),
artist.get_all_albums(),
artist.total_albums(),
artist.top_tracks(),
artist.related_artists()
])
if __name__ == '__main__':
unittest.main()
| [
"m3nta1@yahoo.com"
] | m3nta1@yahoo.com |
07bd5abf6da70de6c220abe83c4631da983520bc | cf8c8118c16e7f38a190bc4a96b28d8c4b89ff58 | /mmdet3d/core/bbox/assigners/__init__.py | 44f51fc0f336ef0b15d9090d3b6017873f6aa83a | [
"Apache-2.0"
] | permissive | encore-zhou/mmdetection3d | a7a5e21c0ec8f99843301b89b1e9b079f0a983c5 | c3d7f97baecd1beff1e9757d51523778c38f118b | refs/heads/master | 2023-03-21T17:03:22.723259 | 2020-09-22T07:08:45 | 2020-09-22T07:08:45 | 283,723,083 | 1 | 1 | Apache-2.0 | 2020-09-22T07:08:46 | 2020-07-30T09:07:38 | Python | UTF-8 | Python | false | false | 133 | py | from mmdet.core.bbox import AssignResult, BaseAssigner, MaxIoUAssigner
__all__ = ['BaseAssigner', 'MaxIoUAssigner', 'AssignResult']
| [
"wayne.zw@outlook.com"
] | wayne.zw@outlook.com |
f5b5532ccdc4dc030b0b591b8d0a3875537e6a09 | 750f3e0bbb91e7cc850d5d901d27f6e298bc86f6 | /BattlegroundPolls.py | 6157363d244e2cc15a946b12490cf4849a8050f3 | [] | no_license | vandanaprabhu2000/PresidentialPolls | 6b20a3ec44f02245fcf232465e76bdc2d3f2d8dc | f057395399b69669ea3c2bc08cdbf95c7b297b6e | refs/heads/master | 2022-12-08T21:11:42.712328 | 2020-09-02T02:15:09 | 2020-09-02T02:15:09 | 291,191,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from rcp import get_polls
battleground_states = [
"Wisconsin",
"Florida",
"Michigan",
"Pennsylvania",
"North Carolina",
"Arizona",
]
for state in battleground_states:
polls = get_polls(candidate="Trump", state=state)
for poll in polls:
print(poll) | [
"vandanaprabhu2000@gmail.com"
] | vandanaprabhu2000@gmail.com |
e4dd882dee464e497e99ac05ba9d79af0ff95fbd | ff999ce591e1f4f06964bb2cf9f159a07468c9fa | /project/app/models.py | d10a0a3a4550a29c185c69348410b831bde3de9e | [] | no_license | Ganapathy7204/corona-django | f95bfee3a6a373a47206d92b23a616d510161899 | cdff58c5573603185fc6f9e0edee5ac55c2c1461 | refs/heads/master | 2022-12-05T11:45:51.381384 | 2020-08-20T10:13:39 | 2020-08-20T10:13:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | from django.db import models
# Create your models here.
class Contact(models.Model):
name=models.CharField(max_length=30)
email=models.EmailField()
phone=models.IntegerField()
desc=models.TextField()
def __str__(self):
return self.name
class Friendspost(models.Model):
sno=models.AutoField(primary_key=True)
title=models.CharField(max_length=50)
content=models.TextField()
author=models.CharField(max_length=50)
img=models.ImageField(upload_to='friends',blank=True,null=True)
timeStamp=models.DateTimeField(auto_now_add=True,blank=True)
def __str__(self):
return self.author
class Adminspost(models.Model):
title=models.CharField(max_length=50)
content=models.TextField()
author=models.CharField(max_length=50)
img=models.ImageField(upload_to='friends',blank=True,null=True)
timeStamp=models.DateTimeField(auto_now_add=True,blank=True)
def __str__(self):
return self.author
# pip install Pillow
| [
"aneesurrehman423@gmail.com"
] | aneesurrehman423@gmail.com |
b2763a3a3c9318b24e36592eed8791533faf27d4 | 4786216d2a8e9221cc3624366152f47ae513e5c7 | /ๅไบฌๆฟๅฑไบคๆ/00.py | 3738ce39b9fbe67fc5d1c47c31d9d290e2cc619a | [] | no_license | injuredangel/- | b6a2502ee026320b96947d41c223edebe3ec65cc | 7988c6aa5e825504ff59b006c37d4383b3bb1da8 | refs/heads/master | 2020-05-25T02:21:15.654253 | 2019-05-20T06:27:42 | 2019-05-20T06:27:42 | 187,575,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py |
import requests
from bs4 import BeautifulSoup
url = 'http://www.bjjs.gov.cn/bjjs/fwgl/fdcjy/fwjy/index.shtml'
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Cookie':'wdcid=55e47ea030f84764; _gscu_1677760547=4060476218oivg24; _gscbrs_1677760547=1; Hm_lvt_9ac0f18d7ef56c69aaf41ca783fcb10c=1540604763,1540621692; wdlast=1540624935; _gscs_1677760547=t406249357bbz3224|pv:1; Hm_lpvt_9ac0f18d7ef56c69aaf41ca783fcb10c=1540624935',
'Host':'www.bjjs.gov.cn',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
}
response = requests.get(url=url,headers=headers).text
print(response)
# html_doc = BeautifulSoup(response,'lxml')
| [
"you@example.com"
] | you@example.com |
e2a285f5d90bbec70f34426e66342056d7e21018 | 87fe6ec76a16f1fb4712df0b6a497f0df75bd5d1 | /shell_test/test.py | 4e31a078b82ed4e85452a1cdd85dfcb6c7d29690 | [] | no_license | Angel888/suanfa | 48d4f6f425d0075111517e7ea635a74b79df97a6 | 4a27fdd976268bf4daf8eee447efd754f1e0bb02 | refs/heads/master | 2023-05-09T06:29:44.649587 | 2021-05-29T00:45:33 | 2021-05-29T00:45:33 | 371,849,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import functools
with open('bb', 'r') as f:
data = f.readlines()
print(data)
data = sorted(data, key = functools.cmp_to_key(lambda x,y: 1 if len(x)> len(y) else -1))
print(data)
# todo https://www.cnblogs.com/apeway/p/10764597.html ๆฐ็ปไธญๆชๅบ็ฐ็ๆๅฐๆญฃๆดๆฐ
def smallest_num(num_list):
num_list.sort()
for i in num_list:
tmp=0
if i>1:
res=1
return 1
elif
| [
"wei.liangjie@mintegral.com"
] | wei.liangjie@mintegral.com |
da6cf0b5706d9aff92391ad04e8b0982ed654063 | 819875a388d7caf6795941db8104f4bf72677b90 | /chrome/test/functional/chromoting_basic.py | 5b8a46d3bbbd9714a1c930b48013acd6a76d1bd1 | [
"BSD-3-Clause"
] | permissive | gx1997/chrome-loongson | 07b763eb1d0724bf0d2e0a3c2b0eb274e9a2fb4c | 1cb7e00e627422577e8b7085c2d2892eda8590ae | refs/heads/master | 2020-04-28T02:04:13.872019 | 2012-08-16T10:09:25 | 2012-08-16T10:09:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # Must come before chromoting and pyauto.
import chromoting
import pyauto
class ChromotingBasic(chromoting.ChromotingMixIn, pyauto.PyUITest):
"""Basic tests for Chromoting."""
def setUp(self):
"""Set up test for Chromoting on both local and remote machines.
Installs the Chromoting app, launches it, and authenticates
using the default Chromoting test account.
"""
super(ChromotingBasic, self).setUp()
app = self.InstallExtension(self.GetWebappPath())
self.LaunchApp(app)
account = self.GetPrivateInfo()['test_chromoting_account']
self.Authenticate(account['username'], account['password'])
def testChromoting(self):
"""Verify that we can start and disconnect from a Chromoting session."""
host = self
client = self.remote
access_code = host.Share()
self.assertTrue(access_code,
msg='Host attempted to share, but it failed. '
'No access code was found.')
self.assertTrue(client.Connect(access_code),
msg='The client attempted to connect to the host, '
'but the chromoting session did not start.')
host.CancelShare()
client.Disconnect()
if __name__ == '__main__':
pyauto_functional.Main()
| [
"loongson@Loong.(none)"
] | loongson@Loong.(none) |
07e55ee3fd2c3c2e3b690cf0132a7e10a918ba60 | 5b3d8f56f4d18dc8809f9f5aa7d2a7089cdbf489 | /.c9/metadata/workspace/Interview/InterviewRQ3.py | 7e7c06281c2528a512f1728fa32de5e30a67c06d | [] | no_license | heyliljill/edpsych-cloned | 89ba1a827ed66651b7387b25bc2c188ff344e8d1 | ba02e4789e390bb6488b11608b994ee5678a4b30 | refs/heads/master | 2020-07-26T00:51:41.004018 | 2019-09-14T17:26:45 | 2019-09-14T17:26:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | {"filter":false,"title":"InterviewRQ3.py","tooltip":"/Interview/InterviewRQ3.py","ace":{"folds":[],"scrolltop":186,"scrollleft":909,"selection":{"start":{"row":38,"column":26},"end":{"row":38,"column":325},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"26c2c4b2d3dc5419ba398fd9a9d5dc56af9b3cc6","undoManager":{"mark":-1,"position":-1,"stack":[]},"timestamp":1403036614000} | [
"jillyma@gmail.com"
] | jillyma@gmail.com |
1210ed8c74d75308a92987b11c01cc0c7b84f9d4 | 3e17606da0f946a2a5f9f0d2949b1a08c015252b | /Paiza_Prac/SkillChallenge/D-Rank/D-53_ใใชใใฏใชใขใใชใผใ/D-53_TrickOrTreat.py | 5462b419771a59147b08efdfa70b7f78a2f03483 | [] | no_license | nao-j3ster-koha/Py3_Practice | 03a5ba57acdb4df964fcfc6b15afdd2f0d833ef1 | 4f64ddc022449060a67f7b0273c65d8f1ff8c680 | refs/heads/master | 2021-10-09T11:04:03.047530 | 2018-10-16T09:15:59 | 2018-10-16T09:15:59 | 103,068,457 | 0 | 0 | null | 2018-10-16T08:47:04 | 2017-09-10T23:12:42 | Python | UTF-8 | Python | false | false | 182 | py | str = input()
while (int(len(str)) < 1 or int(len(str)) > 20):
str = input()
if ( str == 'chocolate' or str == 'candy'):
rslt = 'Thanks!'
else:
rslt = 'No!'
print(rslt) | [
"sdvr.nao@gmail.com"
] | sdvr.nao@gmail.com |
515985f9c365e4f7a75fbacff4880c8cf4311440 | 84e14fa47a457c67a64d5c6fd6a1e37bd55de6e1 | /mynumpy/mynumpy.py | c55e5649673ffcfcedebf56ca4c34272d9262c88 | [] | no_license | python-2019/python-data-analysis | 0f5d47b9bd3166a84ddab6f4a0ea0e746a9901a0 | bf133979c5e342b5ec109b091d3473eadbf53651 | refs/heads/master | 2020-07-15T23:57:46.783095 | 2019-09-16T11:11:46 | 2019-09-16T11:11:46 | 205,677,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,278 | py | import numpy as np
class mynumpy:
@staticmethod
def desc():
print("่ฟๆฏnumpy็ไธไบๆต่ฏ")
@staticmethod
def base1():
# ็ๆ ๆฐ็ป [0 1 2 3 4 5]
arr1 = np.arange(6)
# ๅๅฒ ไธ็ปดๆฐ็ป ไธบ ไบ็ปดๆฐ็ป [[0 1 2] [3 4 5]]
arr2 = arr1.reshape((2, 3))
# ๆฐ็ป ็็ฎๆฏ่ฟ็ฎ [[2 3 4] [5 6 7]]
reshape_arithmetic = arr2 + 2
# ๆฐ็ป ไธ ๆฐ็ป ็่ฟ็ฎ [[ 0 2 4] [ 6 8 10]]
arange_reshape = arr2 + arr2
def base2():
# ๅๅปบไธ็ปดๆฐ็ป [0 1 2 3 4 5 6 7 8 9]
arr = np.arange(10)
# ๅๅปบไธไธชๅธๅฐๆฐ็ป [ True True True]
bool_arr = np.full(3, True, dtype=bool)
# ๅฆไฝไปไธ็ปดๆฐ็ปไธญๆๅๆปก่ถณๆๅฎๆกไปถ็ๅ
็ด ๏ผ [1 3 5 7 9]
filter_arr = arr[arr % 2 == 1]
# ๅฐarrไธญ็ๆๆๅฅๆฐๆฟๆขไธบ - 1๏ผ่ไธๆนๅarrใ [ 0 -1 2 -1 4 -1 6 -1 8 -1]
out = np.where(arr % 2 == 1, -1, arr)
# ๅฐไธ็ปดๆฐ็ป่ฝฌๆขไธบ2่ก็2็ปดๆฐ็ป [[0 1 2 3 4] [5 6 7 8 9]]
arr_reshape = arr.reshape(2, 5)
# ๅฆไฝๅ็ดๅ ๅ ไธคไธชๆฐ็ป [[0 1 2 3 4] [5 6 7 8 9]]
a = np.arange(10).reshape(2, -1)
# repeat ๅๅปบ้ๅคๆฐ็ป [[1 1 1 1 1] [1 1 1 1 1]]
b = np.repeat(1, 10).reshape(2, -1)
# concatenate ๆฐ็ปๆผๆฅ [[0 1 2 3 4 1 1 1 1 1] [5 6 7 8 9 1 1 1 1 1]]
concatenate = np.concatenate([a, b], axis=1)
# ๅฆไฝ่ทๅไธคไธชnumpyๆฐ็ปไน้ด็ๅ
ฌๅ
ฑ้กน๏ผ [2 4]
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6])
b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8])
# intersect1d ไธคไธชๆฐ็ป็ไบค้
d = np.intersect1d(a, b)
# ๅฆไฝไปไธไธชๆฐ็ปไธญๅ ้คๅญๅจไบๅฆไธไธชๆฐ็ปไธญ็้กน [1 2 3 4]
a = np.array([1, 2, 3, 4, 5])
b = np.array([5, 6, 7, 8, 9])
# From 'a' remove all of 'b'
setdiff_d = np.setdiff1d(a, b)
# ๅฆไฝๅพๅฐไธคไธชๆฐ็ปๅ
็ด ๅน้
็ไฝ็ฝฎ array([1, 3, 5, 7]),)
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6])
b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8])
# where ๆปก่ถณๆกไปถ(condition)๏ผ่พๅบx๏ผไธๆปก่ถณ่พๅบyใ
# ๅชๆๆกไปถ (condition)๏ผๆฒกๆxๅy๏ผๅ่พๅบๆปก่ถณๆกไปถ (ๅณ้0) ๅ
็ด ็ๅๆ
# ็ปๆ [4 4 5 6]
where = np.where(a == b)
a[np.where(a > 3)]
def base3():
# ๅฆไฝๅ่ฝฌไบ็ปดๆฐ็ป็่ก๏ผ
arr = np.arange(9)
# np.array[่ตท้ป;็ต้ป:ๆญฅ้ท]
# ๅฆๆ่ตท้ปๅ็ต้ปๆฒๆ็ตฆๅผ๏ผ่ตท้ป่ชๅ่จญ็บๆๅฐๅผ๏ผ็ต้ป่ชๅ่จญ็บๆๅคงๅผ๏ผ
# ๆไปฅnp.arange(1, 10, 1)[:]ๅnp.arange(1, 10, 1)[0:9]่ผธๅบ็ธๅใ
# ๅ ็บๆญฅ้ทๆฏ-1๏ผๆไปฅnp.arange(1, 10, 1)[::-1]็ญๆผnp.arange(1, 10, 1)[[-1,-2,-3,-4,-5,-6,-7,-8,-9]]๏ผๅ ๆญคๅบ็พๅ่ฝใ
# ็ปๆ [8 7 6 5 4 3 2 1 0]
arr_ = arr[::-1]
# ๅฆไฝ่ฎก็ฎnumpyๆฐ็ป็ๅๅผ๏ผไธญไฝๆฐ๏ผๆ ๅๅทฎ
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])
# Solution
mu, med, sd = np.mean(sepallength), np.median(sepallength), np.std(sepallength)
print(mu, med, sd)
if __name__ == '__main__':
# ๆดๆนๆฐ็ป็ๅฝข็ถ
a = np.floor(10 * np.random.random((3, 4)))
print(a)
| [
"854731306@QQ.com"
] | 854731306@QQ.com |
d1e535da617f09a037448c3df23b3b182bcedd53 | c0578b14ebaef889ffc75551ebcc7e5c80b6069e | /src/811_subdomain_visit_count.py | 87eb66cd63973d2c0ce2d010c3afb1f86145ce1f | [] | no_license | BrianQcq/LeetCode | 88ee122aa2b358c61d6980c159008e8ccac6cc8c | 127ca7d82fa15214da8d5e9fbc461831cdb6b60b | refs/heads/master | 2020-06-10T04:20:33.798787 | 2019-11-12T07:56:58 | 2019-11-12T07:56:58 | 193,580,067 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py |
class Solution(object):
def subdomainVisit(self, cpdomains):
d = {}
for item in cpdomains:
n, domains = item.split()
n, domains = int(n), domains.split('.')
for i in range(len(domains)):
temp = '.'.join(domains[i:])
d[temp] = d[temp] + n if temp in d else n
return [str(d[i]) + ' ' + i for i in d]
| [
"qiuchuanqin@gmail.com"
] | qiuchuanqin@gmail.com |
b934c3888b04e8b3f176a880a569f036ad34e3b6 | 05551338203763bad453a2264c5b6582d725ed3d | /MusicAnalyser/settings.py | 5e2d64f8d5926c6d80a9e58570461a3facf1b76d | [] | no_license | ShivayaDevs/MusicAnalyser | 51e553c138ee5e05b9b9c8ec19e10e5594a6d05d | b86abceebb1c11e938af43747dca4512ecb00ca3 | refs/heads/master | 2021-01-22T18:23:31.125100 | 2017-03-17T19:05:33 | 2017-03-17T19:05:33 | 85,077,721 | 5 | 2 | null | 2017-03-17T06:19:51 | 2017-03-15T13:47:18 | Python | UTF-8 | Python | false | false | 3,133 | py | """
Django settings for MusicAnalyser project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5g&r-8m5zqmy0o(4v-h741uip@xe3j@8e+mv=-4(qxp76@7fap'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analyser',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MusicAnalyser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MusicAnalyser.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"verma.yash8@gmail.com"
] | verma.yash8@gmail.com |
a4b161d665baf8d27aecbdb191e60e06308b2f62 | 8c7fba506eb022e627537e6017b97508ca453b65 | /models/dbsetup.py | 7b7c8e89cda78eb02cff7f6496740a112f1c6dcd | [
"MIT"
] | permissive | laminko/wBlog | 4a6851ba159c5cf30461fd08b428647c14622c14 | c2bdecede8bf589eabb57bd080e90d995261aafd | refs/heads/master | 2020-04-06T07:05:22.722787 | 2016-09-15T09:01:34 | 2016-09-15T09:01:34 | 65,677,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,885 | py | from datetime import datetime
SYM_PAGE_BREAKER = " {LMK:PAGE-BREAK} "
SINGLE_SPACE = " "
# Tables
db.define_table('post',
Field('title', 'string'),
Field('body', 'text'),
Field('body_pagebreak',
compute=lambda r: (
r['body'] or "").split(SYM_PAGE_BREAKER)[0]),
Field('body_nobreak',
compute=lambda r: (
r['body'] or "").replace(SYM_PAGE_BREAKER,
SINGLE_SPACE)),
Field('has_pagebreak',
compute=lambda r: SYM_PAGE_BREAKER in (r['body'] or "")),
Field('is_draft', 'boolean', default=False),
Field('total_likes', 'integer', default=0,
readable=False, writable=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False),
Field('tags', 'list:string'))
db.define_table('postcomment',
Field('post', 'reference post',
readable=False, writable=False),
Field('body', 'text', label=T("Comment")),
Field('is_approved', 'boolean', default=False,
readable=False, writable=False),
Field('is_deleted', 'boolean', default=False,
readable=False, writable=False),
Field('reply_to', 'reference postcomment',
readable=False, writable=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('contact',
Field('name', 'string', requires=IS_NOT_EMPTY()),
Field('email', 'string', requires=[
IS_NOT_EMPTY(), IS_EMAIL()]),
Field('description', 'text', requires=IS_NOT_EMPTY()),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False))
db.define_table('bulletin',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('message_body', 'text', requires=IS_NOT_EMPTY()),
Field('message_type', 'string',
default='info',
requires=IS_IN_SET(('success',
'info',
'warning',
'danger',
'special'))),
Field('expires_on', 'datetime', default=None),
Field('is_active', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('eventinfo',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('event_detail', 'text', requires=IS_NOT_EMPTY()),
Field('image_url', 'text'),
Field('location_text', 'text'),
Field('location_lat', 'float'),
Field('location_lng', 'float'),
Field('event_start', 'datetime'),
Field('event_end', 'datetime'),
Field('is_active', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
db.define_table('upload',
Field('title', 'string', requires=IS_NOT_EMPTY()),
Field('the_file', 'upload'),
Field('is_public', 'boolean', default=True,
comment='Public url is like <b>/getobject/(id)</b>.'),
Field('created_on', 'datetime', default=request.now,
readable=False, writable=False),
Field('created_by', 'reference auth_user',
default=auth.user_id,
readable=False, writable=False),
Field('modified_on', 'datetime', update=request.now,
readable=False, writable=False),
Field('modified_by', 'reference auth_user',
update=auth.user_id,
readable=False, writable=False))
# check default root user exists or not.
if db(db.auth_user).count() < 1:
# if not:
# create groups once.
db.auth_group.bulk_insert([
dict(role='Root', description='System user'),
dict(role='Admin', description='Blog admin'),
dict(role='Editor', description='Blog editor'),
dict(role='Moderator', description='Blog moderator'),
dict(role='User', description='Blog reader')
])
# create default root user.
db.auth_user.insert(
**dict(
first_name='System',
last_name='User',
email='root@root.su',
password=db.auth_user.password.validate('root@root.su')[0]
)
)
# set permission for default user.
auth.add_membership(user_id=1, group_id=1)
| [
"="
] | = |
d41c719892923ffb1f64eb2111db4b88f6752e4b | 5564382abc0a9ea1f19a1df063ed95c85fd241d8 | /organdonationwebapp/API/Authenticator.py | 3a24d0358a6e5906ef0a28f98daa3d88ad914e7f | [] | no_license | pratikkapoor90/Organ-Donation-App | 83dade2ee26ea4f7dc34748f785fc968640aa852 | 36a628a2b5406f6795cb37006153131240b29bde | refs/heads/master | 2022-12-17T14:35:24.661770 | 2018-11-06T17:23:06 | 2018-11-06T17:23:06 | 156,418,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py |
import organdonationwebapp.API.UserTypeFactory as factory
class Authenticator(object):
def __init__(self,loginJson, logger):
self.usertype = loginJson['logintype'] if 'logintype' in loginJson else None
self.email = loginJson['emailID'] if 'emailID' in loginJson else None
self.password = loginJson['password'] if 'password' in loginJson else None
self.json = loginJson
self.logger = logger
def validateLogin(self):
try:
objectFactory = factory.UserTypeFactory(self.json,self.logger ,None, self.usertype,)
inst = objectFactory.createObject()
valid, url = inst.login()
return valid, url
except Exception as err:
print(err)
return None | [
"pratik.kapoor@dal.ca"
] | pratik.kapoor@dal.ca |
360662ab4716ef28923524956a306b1e9ae36d00 | 2f2334477bce67f116a3547683192710761ac21e | /happy_python/happy_config.py | 3629798b6da17f58f6d0a7a19e6ceda5c4781e35 | [
"MIT"
] | permissive | geekcampchina/happy-python | ca00ddce75c3f8b5a50430c824628b5b6d178487 | c2b555fc3c90e91258d1145b99e266f9219f45f3 | refs/heads/master | 2023-08-18T02:02:03.568370 | 2023-08-10T07:09:35 | 2023-08-10T07:09:35 | 198,445,592 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,349 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
้
็ฝฎๆไปถ๏ผINI๏ผ่ฝฌๆข็ฑป
"""
import os
from abc import ABCMeta
from dataclasses import dataclass
from happy_python import HappyPyException
@dataclass
class HappyConfigXListNode:
prefix: str
keys: list[str]
@dataclass
class HappyConfigXList:
section: str
node: HappyConfigXListNode
class HappyConfigBase(object, metaclass=ABCMeta):
_section = 'main'
_xlist: list[HappyConfigXList] = []
def __init__(self):
pass
@property
def section(self):
"""
ไฝฟ็จ property + setter ๆณจ่งฃ้ฒๆญข็จๆทๆๅฎๆ ๆๅผ
:return:
"""
return self._section
@section.setter
def section(self, value):
if value:
self._section = value
else:
raise ValueError("ๆๅฎ็ section ๅฑๆงๅผๆ ๆใ")
def xlist_add(self, prefix: str, key: str, section: str = ''):
__section = self._section if section == '' else section
_key = '%s.%s' % (prefix, key)
if len(self._xlist) == 0:
self._xlist.append(HappyConfigXList(section=__section,
node=HappyConfigXListNode(prefix=prefix,
keys=[_key])))
else:
for xlist in self._xlist:
if xlist.section == __section and xlist.node.prefix == prefix:
if _key not in xlist.node.keys:
xlist.node.keys.append(_key)
def xlist_key(self, prefix: str, section: str = '') -> list[str]:
__section = self._section if section == '' else section
for xlist in self._xlist:
if xlist.section == __section and xlist.node.prefix == prefix:
return xlist.node.keys
return []
def xlist_get(self, prefix: str, key: str = ''):
key = prefix + ('.' + key if key else '')
return self.__dict__[key] if key in self.__dict__ else None
class HappyConfigParser(object):
@staticmethod
def load(filename: str, happy_config_object: HappyConfigBase):
if not isinstance(happy_config_object, HappyConfigBase):
raise HappyPyException('happy_config_object ไธๆฏ HappyConfigBase ็ฑป็ๅญ็ฑปๅฏน่ฑกใ')
try:
if not os.path.exists(filename):
print("[Error] ้
็ฝฎๆไปถ %s ไธๅญๅจ" % filename)
exit(1)
with open(filename, 'r', encoding='UTF-8') as f:
content = f.read()
HappyConfigParser._loads(content, happy_config_object)
except Exception as e:
print("[Error] ้
็ฝฎๆไปถ่ฏปๅ้่ฏฏ๏ผ%s" % str(e))
exit(1)
@staticmethod
def _loads(content: str, happy_config_object: HappyConfigBase):
def set_attr(t, _name, _new_name):
if t is str:
v = cfg.get(section, _name)
elif t is int:
v = cfg.getint(section, _name)
elif t is bool:
v = cfg.getboolean(section, _name)
elif t is float:
v = cfg.getfloat(section, _name)
elif t is list:
v = cfg.get(section, _name).split(',')
else:
v = cfg.getboolean(section, _name)
setattr(happy_config_object, _new_name, v)
from configparser import RawConfigParser
if not isinstance(happy_config_object, HappyConfigBase):
raise HappyPyException('happy_config_object ไธๆฏ HappyConfigBase ็ฑป็ๅญ็ฑปๅฏน่ฑกใ')
try:
cfg = RawConfigParser()
cfg.read_string(content)
class_attrs = happy_config_object.__dict__
section = happy_config_object.section
for name, value in class_attrs.items():
if name == '_section':
continue
set_attr(type(value), name, name)
for section, section_obj in cfg.items():
if section == '_section':
continue
for name, value in section_obj.items():
if not name.startswith('!'):
continue
new_name = name[1:]
parts = new_name.split('.')
if len(parts) >= 2:
happy_config_object.xlist_add(section=section, prefix=parts[0], key=parts[1])
set_attr(type(value), name, new_name)
except Exception as e:
print("[Error] ้
็ฝฎๆไปถ่ฏปๅ้่ฏฏ๏ผ%s" % str(e))
exit(1)
@staticmethod
def load_with_var(filename: str, var_dict: dict, happy_config_object: HappyConfigBase):
try:
if not os.path.exists(filename):
print("[Error] ้
็ฝฎๆไปถ %s ไธๅญๅจ" % filename)
exit(1)
with open(filename, 'r', encoding='UTF-8') as f:
content = ''.join(f.readlines())
for var, value in var_dict.items():
content = content.replace('${%s}' % var, value)
HappyConfigParser._loads(content, happy_config_object)
except Exception as e:
print("[Error] ้
็ฝฎๆไปถ่ฏปๅ้่ฏฏ๏ผ%s" % str(e))
exit(1)
| [
"fifilyu@gmail.com"
] | fifilyu@gmail.com |
a883876441ca09c7cd5b24217de749b9af164355 | c3768b8bbf3c35bd6cc353b97a5c979239cbd8d2 | /String Reversal through stack.py | 245dd1cb686de9846c67b32a05fab84690107c82 | [] | no_license | iAnas19/Data-Structure-Assignments | 0fbd5b933b0045526c7937e54ace83f8ef4cbd3d | 51f876ce26807b49593fe5fa1438f37bcf081c9e | refs/heads/master | 2023-07-17T02:21:31.407895 | 2021-09-04T14:26:02 | 2021-09-04T14:26:02 | 402,455,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | Stack = []
def isEmpty(stack):
if(stack == []):
return True
else:
False
def push(stack, item):
stack.append(item)
def pop(stack):
if(isEmpty(stack)):
return False
else:
s = stack.pop()
return s
def reverse(stack):
n = len(stack)
emptyStk = []
for i in range(0, n, 1):
push(emptyStk, stack[i])
stack = ''
for i in range(0, n, 1):
stack += pop(emptyStk)
return stack
while True:
stack = input('Enter values you want to reverse: ')
item = reverse(stack)
print('Reversed values are %s' % item)
| [
"anasarif216@gmail.com"
] | anasarif216@gmail.com |
285153a37535e81f1417b628a44f7dafe148b5f3 | dc66c0cf24c5f741b6288f3d73e6436752432dad | /Backend/blog/views.py | 203ade87be2962509d27279ad4a6c6d0e529d7a3 | [
"MIT"
] | permissive | Linzecong/LPOJ | bdcf79f5e751419c0cff14c818512d5509fd849f | 2f7ce194f1d510d8d006c2a35fdaa272f20ef1f3 | refs/heads/master | 2023-01-20T15:42:12.865669 | 2022-01-05T15:05:55 | 2022-01-05T15:05:55 | 164,289,923 | 236 | 79 | MIT | 2023-01-14T00:55:14 | 2019-01-06T08:41:36 | Vue | UTF-8 | Python | false | false | 1,365 | py | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.throttling import ScopedRateThrottle
from .models import OJMessage, Blog,Banner
from .serializers import OJMessageSerializer, BlogSerializer,BannerSerializer
from .permission import ManagerOnly, UserRatingOnly
class BannerView(viewsets.ModelViewSet):
queryset = Banner.objects.all().order_by("-id")
serializer_class = BannerSerializer
filter_fields = ('time',)
permission_classes = (ManagerOnly,)
pagination_class = LimitOffsetPagination
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class OJMessageView(viewsets.ModelViewSet):
queryset = OJMessage.objects.all().order_by("-id")
serializer_class = OJMessageSerializer
filter_fields = ('username', 'time')
permission_classes = (UserRatingOnly,)
pagination_class = LimitOffsetPagination
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class BlogView(viewsets.ModelViewSet):
queryset = Blog.objects.all().order_by("-id")
serializer_class = BlogSerializer
filter_fields = ('username', 'time')
pagination_class = LimitOffsetPagination
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
| [
"504603913@qq.com"
] | 504603913@qq.com |
22d902c49c825a300ee0d725ec6a94278a89d037 | da96d86eac60f0148b58c1a2a35d26981806a85a | /analysis/predictor_eval/metrics_disprot/metrics_disprot.py | 3016a0c83218bbbf64face6d288f9180542decb7 | [] | no_license | QiWenL/predIDR | 4d0efdfc38ff05f269701a91e76e478451d96533 | 7f5dde489fc12afdea45d3da32a136f907a7b60b | refs/heads/master | 2023-07-18T16:03:03.580240 | 2021-09-01T16:33:31 | 2021-09-01T16:33:31 | 298,642,410 | 0 | 0 | null | 2020-09-25T17:48:12 | 2020-09-25T17:48:11 | null | UTF-8 | Python | false | false | 1,711 | py | """Calculate metrics for all predictors against the DisProt dataset."""
import src.metrics as metrics
y_label_paths = [('aucpred_profile', '../../predictor_eval/aucpredp_disprot/out/aucpredp_labels.fasta'),
('aucpred_seq', '../../predictor_eval/aucpreds_disprot/out/aucpreds_labels.fasta'),
('deepcnfd', '../../predictor_eval/deepcnfd_disprot/out/deepcnfd_labels.fasta'),
('espritz_profile', '../../predictor_eval/espritzp_disprot/out/espritzp_labels.fasta'),
('espritz_seq', '../../predictor_eval/espritzs_disprot/out/espritzs_labels.fasta'),
('iupred2a', '../../predictor_eval/iupred2a_disprot/out/iupred2a_labels.fasta'),
('disopred3', '../../predictor_eval/disopred3_disprot/out/disopred3_labels.fasta')]
y_score_paths = [('aucpred_profile', '../../predictor_eval/aucpredp_disprot/out/aucpredp_scores.fasta'),
('aucpred_seq', '../../predictor_eval/aucpreds_disprot/out/aucpreds_scores.fasta'),
('deepcnfd', '../../predictor_eval/deepcnfd_disprot/out/deepcnfd_scores.fasta'),
('espritz_profile', '../../predictor_eval/espritzp_disprot/out/espritzp_scores.fasta'),
('espritz_seq', '../../predictor_eval/espritzs_disprot/out/espritzs_scores.fasta'),
('iupred2a', '../../predictor_eval/iupred2a_disprot/out/iupred2a_scores.fasta'),
('disopred3', '../../predictor_eval/disopred3_disprot/out/disopred3_scores.fasta')]
metrics.main('../../disprot_validation/format_seqs/out/disprot_labels.fasta', '>disprot_id:(DP[0-9]+)|',
y_label_paths=y_label_paths, y_score_paths=y_score_paths, visual=True)
| [
"marcsingleton@berkeley.edu"
] | marcsingleton@berkeley.edu |
dbfc45ee797d3708c6e32e2317b9d91c3383dcc9 | ee0403cb47394a3dfe78f41e674fd7146262c805 | /src/main/python/pitInput.py | 125b6e4aeb902b48fa3edb7c4216e07d4ad7bdee | [] | no_license | BREAD5940/ScoutingBase | 0d9fa2bf04ec1e1cc1b0306e6be0dd5f0b151425 | 558b83b4c1b3dfe4de87a07baedb178a1adf932a | refs/heads/master | 2020-04-30T20:39:04.250657 | 2019-11-03T23:57:50 | 2019-11-03T23:57:50 | 177,072,958 | 1 | 2 | null | 2019-10-07T00:33:44 | 2019-03-22T04:47:36 | Java | UTF-8 | Python | false | false | 8,545 | py | import PySimpleGUI as sg
# import main
# as dumb as it sounds, this is linked in with the rest of the base. line 165 (ish)
robotColumnTwoElectricBoogaloo = [
[sg.Text(' '), sg.Checkbox('Climbs')],
[sg.Text("\t\tLevel:", size=(19, 1)), sg.Radio('2', 'LVL'), sg.Radio('3', 'LVL'), sg.Radio('Assist', 'LVL')],
[sg.Text('\tType of intake:', size=(18, 1)), sg.Checkbox('Cargo'), sg.Checkbox('Hatch')],
[sg.Text('\tRocket Reach: '), sg.Radio('1', 'PLACE'), sg.Radio('2', 'PLACE'), sg.Radio('3', 'PLACE')],
[sg.Text('\tMechanical Issues: '), sg.Slider(range=(0,5), orientation='h', default_value=1)],
[sg.Text(' '), sg.Checkbox('Has camera')],
[sg.Text(' '), sg.Checkbox('Has sensor/limelight')],
[sg.Text(' '), sg.Checkbox('Uses presets')],
[sg.Text('\tCenter of Gravity: '), sg.Radio('High', 'COG'), sg.Radio('Low', 'COG'), sg.Radio('Unknown', 'COG')],
[sg.Text(' '), sg.Checkbox('Can reach over cargo')],
[sg.Text(' '), sg.Checkbox('Rampbot')],
[sg.Text('\tRobot nicknames (comma-seperated): ')],
[sg.Text('\t '), sg.InputText()]
]
robotColumn = [
[sg.Text('Robot')],
[sg.Column(robotColumnTwoElectricBoogaloo)]
]
sandColumn = [
[sg.Text('\tSandstorm')],
[sg.Text('\t\tStarting Hab: '), sg.Radio('1', 'START'), sg.Radio('2', 'START')],
[sg.Text('\t\tMain Strategy: ')],
# yes, I manually made this column. please shoot me.
[sg.Text('\t\t '), sg.Checkbox('Hatch on close side rocket')],
[sg.Text('\t\t '), sg.Checkbox('Hatch on far side rocket')],
[sg.Text('\t\t '), sg.Checkbox('Hatch on front cargo ship')],
[sg.Text('\t\t '), sg.Checkbox('Hatch somewhere else on the cargo ship')],
[sg.Text('\t\t '), sg.Checkbox('Cargo in cargo ship')],
[sg.Text('\t\t '), sg.Checkbox('Just across the line')],
[sg.Text('\t\t '), sg.Checkbox('Multi game piece (plz give notes)')],
[sg.Text('\t\t '), sg.Checkbox('Other (describe in notes)')],
[sg.Text('\t\t Notes: '), sg.Multiline()],
[sg.Text('\t\tControl: '), sg.Radio('Driver + camera', 'CTRL'), sg.Radio('Auto', 'CTRL')],
]
teleColumn = [
[sg.Text('\tMain Teleop Strategy: ')],
# yes, I manually made this column. please shoot me.
[sg.Text('\t '), sg.Checkbox('Ship cargo and hatch')],
[sg.Text('\t '), sg.Checkbox('Ship cargo')],
[sg.Text('\t '), sg.Checkbox('Ship hatch')],
[sg.Text('\t '), sg.Checkbox('Rocket cargo and hatch')],
[sg.Text('\t '), sg.Checkbox('Rocket cargo')],
[sg.Text('\t '), sg.Checkbox('Rocket hatch')],
[sg.Text('\t '), sg.Checkbox('Mixed rocket/ship (give notes)')],
[sg.Text('\t '), sg.Checkbox('Defense')],
[sg.Text('\t '), sg.Checkbox('Flexible (give notes)')],
[sg.Text('\t '), sg.Checkbox('Other (describe in notes)')],
[sg.Text('\t Notes: '), sg.Multiline()],
]
otherStratColumn = [
[sg.Text('\tGame Pieces per Match: '), sg.Slider(range=(0,40), orientation='h'), sg.Text('\tCycle Time (average): '), sg.Slider(range=(1,40), orientation='h')],
[sg.Text('\tPrefered Game Piece: '), sg.Checkbox('Hatch'), sg.Checkbox('Cargo')]
]
stratColumn = [
[sg.Text('Strategy')],
[sg.Column(sandColumn), sg.Column(teleColumn)],
[sg.Column(otherStratColumn)]
]
hpPrefsColumn = [
[sg.Text('\tHuman Player: ')],
[sg.Text('\t '), sg.Checkbox('Relies on')],
[sg.Text('\t '), sg.Checkbox('Would like, but flexible')],
[sg.Text('\t '), sg.Checkbox('No preference')]
]
stratPrefsColumn = [
[sg.Text('\tStrategy:')],
[sg.Text('\t '), sg.Checkbox('Has a strong strategy, sticks to it')],
[sg.Text('\t '), sg.Checkbox('Would prefer their own, but can switch')],
[sg.Text('\t '), sg.Checkbox('Very flexible')]
]
prefsColumn = [
[sg.Text('Preferences')],
[sg.Column(hpPrefsColumn), sg.Column(stratPrefsColumn)]
]
notesColumn = [
[sg.Text('Notes: ')],
[sg.Multiline(size=(100, 10))]
]
layout = [
[sg.Text('Team Number: '), sg.InputText(), sg.Text('Team Name: '), sg.InputText()],
[sg.Column(robotColumn, background_color='#e8e6e5'), sg.Column(stratColumn, background_color='#e8e6e5')],
[sg.Column(prefsColumn, background_color='#e8e6e5'), sg.Column(notesColumn, background_color='#e8e6e5')],
[sg.Submit(), sg.Cancel()]
]
window = sg.Window('Pit Scouting Data Entry', resizable=True).Layout(layout)
button, values = window.Read()
'''
0: number
1: name
2: Climbs (t/f)
3: Climb level 2
4: climb level 3
5: climb assist
6: cargo intake
7: hatch intake
8: reach level 1
9: reach level 2
10: reach level 3
11: mechanical issues (float)
12: camera
13: sense/lime
14: presets
15: cog high
16: cog low
17: cog unknown
18: long arm
19: rampy boi
20: nicknames
21: level 1 start
22: level 2 start
23: hatch close rocket
24: hatch far rocket
25: hatch front
26: hatch cargo
27: cargo
28: line
29: multi
30: other
31: sandstorm strat notes
32: driver + cam
33: auto
34: ship c + h
35: ship c
36: ship h
37: rocket c+h
38: rocket c
39: rocket h
40: mixed rocket/ship
41: defense
42: flexible
43: other
44: teleop strat notes
45: gps per match
46: av cycle time
47: pref h
48: pref c
49: relies on hp
50: would like hp
51: no pref on hp
52: strong strat
53: pref strat
54: flex strat
55: notes
'''
print(values)
i=0
while i < len(values):
if isinstance(values[i], str):
values[i] = values[i].replace('\n', ' ')
i=i+1
climbLvl = 1
reachLvl=0
cog='u'
startLvl=1
sandStrat=[]
autoDrive=False
teleStrat=[]
gpPref=[]
humanPrefs=[]
stratPrefs=[]
if values[3]:
climbLvl=2
elif values[4]:
climbLvl=3
if values[8]:
reachLvl=1
elif values[9]:
reachLvl=2
elif values[10]:
reachLvl=3
if values[15]:
cog='h'
elif values[16]:
cog='l'
elif values[17]:
cog='u'
if values[21]:
startLvl=1
elif values[22]:
startLvl=2
if values[23]:
sandStrat.append("hatch close side rocket")
if values[24]:
sandStrat.append("hatch far side rocket")
if values[25]:
sandStrat.append("hatch front cargo ship")
if values[26]:
sandStrat.append("hatch cargo ship")
if values[27]:
sandStrat.append("cargo cargo ship")
if values[28]:
sandStrat.append("across hab line")
if values[29]:
sandStrat.append("god-tier multi piece")
if values[30]:
sandStrat.append("other")
if values[32]:
autoDrive=False
elif values[33]:
autoDrive=True
if values[34]:
teleStrat.append("cargo and hatch cargo ship")
if values[35]:
teleStrat.append("cargo cargo ship")
if values[36]:
teleStrat.append("hatch cargo ship")
if values[37]:
teleStrat.append("cargo and hatch rocket")
if values[38]:
teleStrat.append("cargo rocket")
if values[39]:
teleStrat.append("hatch rocket")
if values[40]:
teleStrat.append("rocket/cargo ship mixed")
if values[41]:
teleStrat.append("defense")
if values[42]:
teleStrat.append("flexible")
if values[43]:
teleStrat.append("other")
if values[47]:
gpPref.append("h")
if values[48]:
gpPref.append("c")
if values[49]:
humanPrefs.append("relies on")
if values[50]:
humanPrefs.append("would like")
if values[51]:
humanPrefs.append("no pref")
if values[52]:
stratPrefs.append("strong plan")
if values[53]:
stratPrefs.append("prefers their plan")
if values[54]:
stratPrefs.append("flexible on plan")
# outFile = open(main.dataDirectory+"pitData.csv", "a")
# outFile.write(str(values[0])+","+str(values[2])+","+str(climbLvl)+","+str(values[5])+","+str(values[6])+","+str(values[7])+","+str(reachLvl)+","+str(values[11])+","+str(values[12])+","+str(values[13])+","+str(values[14])+","+cog+","+str(values[18])+","+str(values[19])+","+str(startLvl)+","+str(sandStrat)+","+"|"+str(values[31])+"|"+","+str(autoDrive)+","+str(teleStrat)+","+"|"+str(values[44])+"|"+","+str(values[45])+","+str(values[46])+","+str(gpPref)+","+str(humanPrefs)+","+str(stratPrefs)+","+"|"+str(values[20])+"|"+","+"|"+str(values[55])+"|\n")
| [
"45951442+unduli-attacked@users.noreply.github.com"
] | 45951442+unduli-attacked@users.noreply.github.com |
36d766acb64d266f4988a64145c619c6d89a0910 | 17331ee8285a1f19e4ca1abd89dac64da381959d | /03-accessing-web-data/reading-webpages.py | 537e39244e68328b3514cbd9f43c78a7595785c4 | [] | no_license | chaochaocodes/PY4E | 3681367ce548fe9a423adb895fe76efda60521bb | 09930f6187c3388b61903680bcd4a1533b0b4f82 | refs/heads/main | 2023-03-28T11:29:09.209120 | 2021-04-01T02:34:58 | 2021-04-01T02:34:58 | 333,506,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | '''
Reading Webpages like Files using urllib
'''
import urllib.request, urllib.parse, urllib.error
# 1. Read like a File
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
for line in fhand:
print(line.decode().strip())
# reads the HTML file!
# returns header + body, but header not returned in this for loop; accessed another way
# 2. Working with the data. Retrieve and find frequency of words
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
counts = dict()
for line in fhand:
words = line.decode().split()
# line is a byte string, decode into character string
for word in words:
counts[word]: counts.get(word,0) + 1
print(counts)
# array of words, count and save in dict
| [
"57464564+chaochaocodes@users.noreply.github.com"
] | 57464564+chaochaocodes@users.noreply.github.com |
5e15489107b3c51fb2cfad091143fbf0e6ceb0fc | 9009ad47bc1d6adf8ee6d0f2f2b3125dea44c0aa | /cf-540-a.py | 44dd74a1dc85a6de815348507461f004dcdbb3da | [] | no_license | luctivud/Coding-Trash | 42e880624f39a826bcaab9b6194add2c9b3d71fc | 35422253f6169cc98e099bf83c650b1fb3acdb75 | refs/heads/master | 2022-12-12T00:20:49.630749 | 2020-09-12T17:38:30 | 2020-09-12T17:38:30 | 241,000,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | # JAI SHREE RAM
import math; from collections import *
import sys; from functools import reduce
# sys.setrecursionlimit(10**6)
def get_ints(): return map(int, input().strip().split())
def get_list(): return list(get_ints())
def get_string(): return list(input().strip().split())
def printxsp(*args): return print(*args, end="")
def printsp(*args): return print(*args, end=" ")
UGLYMOD = int(1e9)+7; SEXYMOD = 998244353; MAXN = int(1e5)
# sys.stdin=open("input.txt","r");sys.stdout=open("output.txt","w")
# for _testcases_ in range(int(input())):
n = int(input())
s = input()
t = input()
ans = 0
for i in range(n):
first = int(s[i])
secon = int(t[i])
diff = max(first, secon) - min(first, secon)
ans += min(diff, 10-diff)
print(ans)
'''
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
THE LOGIC AND APPROACH IS MINE @luctivud ( UDIT GUPTA )
Link may be copy-pasted here if it's taken from other source.
DO NOT PLAGIARISE.
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
''' | [
"luctivud@gmail.com"
] | luctivud@gmail.com |
0f320858fc3f8e67f6f3bf1cd4c3a96c6ca87654 | df4729527451f50435e564d2e01ad95dab5d5c38 | /booster/datastruct/__init__.py | 22630ff113d550bb283535ae4a50793bedef54e1 | [
"MIT"
] | permissive | sajadn/booster-pytorch | 5d97a21a6a84efe99c80d31fb154fabb93347628 | a8f447160c30224112731a25f90f6f97126a34b2 | refs/heads/master | 2022-11-06T05:52:51.168983 | 2020-06-22T20:25:24 | 2020-06-22T20:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | from .aggregator import Aggregator
from .diagnostic import Diagnostic | [
"valentin.lievin@gmail.com"
] | valentin.lievin@gmail.com |
f7881b2609d4092aa8e483ad9b8bc0d585901f87 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/project/maps/data/__init__.py | 41d0dcae71d4be63e17e6aabe7e0795053028508 | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 3,054 | py | import collections
import os
from abstractions import *
import data.jsonl
DATA_DIRECTORY = "data"
USER_DIRECTORY = "users"
def load_data(user_dataset, review_dataset, restaurant_dataset):
with open(os.path.join(DATA_DIRECTORY, user_dataset)) as f:
user_data = jsonl.load(f)
with open(os.path.join(DATA_DIRECTORY, review_dataset)) as f:
review_data = jsonl.load(f)
with open(os.path.join(DATA_DIRECTORY, restaurant_dataset)) as f:
restaurant_data = jsonl.load(f)
# Load users.
userid_to_user = {}
for user in user_data:
name = user["name"]
_user_id = user["user_id"]
user = make_user(name, []) # MISSING: reviews
userid_to_user[_user_id] = user
# Load restaurants.
busid_to_restaurant = {}
for restaurant in restaurant_data:
name = restaurant["name"]
location = float(restaurant["latitude"]), float(restaurant["longitude"])
categories = restaurant["categories"]
price = restaurant["price"]
if price is not None:
price = int(price)
num_reviews = int(restaurant["review_count"])
_business_id = restaurant["business_id"]
restaurant = make_restaurant(name, location, categories, price, []) # MISSING: reviews
busid_to_restaurant[_business_id] = restaurant
# Load reviews.
reviews = []
busid_to_reviews = collections.defaultdict(list)
userid_to_reviews = collections.defaultdict(list)
for review in review_data:
_user_id = review["user_id"]
_business_id = review["business_id"]
restaurant = restaurant_name(busid_to_restaurant[_business_id])
rating = float(review["stars"])
review = make_review(restaurant, rating)
reviews.append(review)
busid_to_reviews[_business_id].append(review)
userid_to_reviews[_user_id].append(review)
# Reviews done.
restaurants = {}
for busid, restaurant in busid_to_restaurant.items():
name = restaurant_name(restaurant)
location = list(restaurant_location(restaurant))
categories = restaurant_categories(restaurant)
price = restaurant_price(restaurant)
restaurant_reviews = busid_to_reviews[busid]
restaurant = make_restaurant(name, location, categories, price, restaurant_reviews)
restaurants[name] = restaurant
# Restaurants done.
users = []
for userid, user in userid_to_user.items():
name = user_name(user)
user_reviews = userid_to_reviews[userid]
user = make_user(name, user_reviews)
users.append(user)
# Users done.
return users, reviews, list(restaurants.values())
USERS, REVIEWS, ALL_RESTAURANTS = load_data("users.json", "reviews.json", "restaurants.json")
CATEGORIES = {c for r in ALL_RESTAURANTS for c in restaurant_categories(r)}
def load_user_file(user_file):
with open(os.path.join(USER_DIRECTORY, user_file)) as f:
return eval(f.read())
import glob
USER_FILES = [f[6:-4] for f in glob.glob("users/*.dat")]
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
a38a7b826119a66c0fcc3963011aa4e1dcb57c07 | 5db480d7addf69f16c0dffd6b4ad3200503d28c9 | /portfolio/models.py | c911dbad0454dbe6babb12eab348b3e4a8603685 | [] | no_license | sgupta117/portfolio_app | e32683848289dfde04a4b1294dc856f584b23514 | 9408c0eca15f6d5496a9aedf823040f21875a7e2 | refs/heads/master | 2023-01-01T06:37:09.432621 | 2020-10-26T19:34:23 | 2020-10-26T19:34:23 | 301,767,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from django.db import models
class Projects:
id:int
img: str
name: str
desc: str
liveDemo: str
codeLink: str
class Blogs:
id:int
img:str
name:str
desc:str
link:str
| [
"kamalpant2024@gmail.com"
] | kamalpant2024@gmail.com |
28eb45f482d06d92de0f4c0516e1235d4457e02e | bbc917543e639deaf0e01bed83dc2b21b45e6c79 | /IzVerifier/izspecs/izproperties.py | b93536327bd3c9f085d04d564407f45989163fa2 | [
"MIT"
] | permissive | Python3pkg/IzVerifier | 904f27e2ced718d01082590146bc117df65f1e90 | 124c68baf66ee4978d824af376859190ec2d6ed5 | refs/heads/master | 2021-01-21T17:37:29.970116 | 2017-05-21T15:38:37 | 2017-05-21T15:38:37 | 91,968,090 | 0 | 0 | null | 2017-05-21T15:38:35 | 2017-05-21T15:38:35 | null | UTF-8 | Python | false | false | 2,650 | py | from bs4 import BeautifulSoup
import re
__author__ = 'fcanas'
class IzProperties(dict):
"""
Responsible for parsing and containing any properties used by IzPack's installation spec files.
"""
def __init__(self, path):
"""
Initialize paths to properties and begin parsing.
"""
# noinspection PyTypeChecker
dict.__init__(self)
if 'pom.xml' in path:
self.parse_pom_properties(path)
else:
self.parse_properties(path)
def parse_properties(self, path):
"""
Finds properties defined in properties file at specified path adds them to map.
"""
soup = BeautifulSoup(open(path, 'r'))
properties = soup.find_all('properties')
for props in properties:
for prop in props.find_all('property'):
try:
self[prop['name']] = prop['value']
except KeyError:
continue
def parse_pom_properties(self, path):
"""
Special parser for pom.xml file properties.
"""
soup = BeautifulSoup(open(path, 'r'), 'xml')
properties = soup.find_all('properties')
# add the basedir property
self['basedir'] = path.replace('pom.xml', '')
for props in properties:
for prop in props.find_all(recursive=False):
try:
self[str(prop.name)] = str(prop.string)
except KeyError:
continue
def substitute(self, string):
"""
Puts the given string through variable substitution: replacing all incidences of
${key} for the key's value if it exists. If key doesn't exist, it returns
the unsubstituted variable. The substitution is performed iteratively until all
possible variables have been subbed.
"""
while True:
old_string = string
matches = re.findall('\$\{.*\}', string)
if not matches:
break
for match in matches:
value = self._substitute(match)
if not value is match:
string = str.replace(string, match, value)
if string is old_string:
break
return string
def _substitute(self, key):
"""
Substitutes a given key for its value. If the value doesn't exist,
return the key.
Key is in the form ${some.key}
"""
stripped_key = key[2:-1]
if stripped_key in self:
return self[stripped_key]
else:
return key
| [
"fcanas@redhat.com"
] | fcanas@redhat.com |
04caf4c0da09c56e9866004490cb2a798b19d1f9 | 20fae8a286f151f91311b3611cd8c1cb7d314892 | /predictor.py | b731b799de42b1d6080e80b466a1258c9a565079 | [] | no_license | Justus-Nithushan-mitrai/Neural-style-transfer | 5f3b8173cd41e0130de4ec37fc8a6a2fcca20147 | 0b3bea5bdf977b9c658af2483b33b2488ac20dbe | refs/heads/master | 2020-04-10T21:17:56.127248 | 2018-12-11T07:14:18 | 2018-12-11T07:14:18 | 161,292,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,134 | py | import io
import os
import flask
import json
import pandas as pd
import numpy
from flask import request
from flask import send_file
import base64
from keras.models import load_model
import pickle
from io import StringIO
import sys
import signal
import traceback
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
import keras
app = flask.Flask(__name__)
CONTENT_IMAGE = None
STYLE_IMAGE = None
# Image size
IMAGE_SIZE = 500
# Loss Weights
CONTENT_WEIGHT = 0.025
STYLE_WEIGHT = 1.0
STYLE_SCALE = 1.0
TOTAL_VARIATION_WEIGHT = 8.5e-5
CONTENT_LOSS_TYPE = 0
# Training arguments
NUM_ITERATIONS = 1
MODEL = 'vgg19'
RESCALE_IMAGE = 'false'
MAINTAIN_ASPECT_RATIO = 'false' # Set to false if OOM occurs
# Transfer Arguments
CONTENT_LAYER = 'conv' + '5_2' # only change the number 5_2 to something in a similar format
INITIALIZATION_IMAGE = 'content'
POOLING_TYPE = 'max'
# Extra arguments
PRESERVE_COLOR = 'false'
MIN_IMPROVEMENT = 0.0
FINAL_IMAGE_PATH = "/opt/ml/gen_at_iteration_%d.png" % (NUM_ITERATIONS)
RESULT_PREFIX = "/opt/ml/gen"
INPUT_IMAGE_PREFIX = "/opt/ml/"
content_img = INPUT_IMAGE_PREFIX + "image1.jpg"
style_img = INPUT_IMAGE_PREFIX + "image2.jpg"
# class ScoringService(object):
# model = None # Where we keep the model when it's loaded
# @classmethod
# def get_model(cls):
# """Get the model object for this instance, loading it if it's not already loaded."""
# if cls.model == None:
# cls.model = load_model('/opt/ml/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')
# cls.model._make_predict_function()
# return cls.model
# @classmethod
# def predict(cls, input):
# """For the input, do the predictions and return them.
# Args:
# input (a pandas dataframe): The data on which to do the predictions. There will be
# one prediction per row in the dataframe"""
# clf = cls.get_model()
# return clf.predict(input)
@app.route('/ping', methods=['GET'])
def ping():
# print("Ping endpoint has been invoked")
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
# health = ScoringService.get_model() is not None # You can insert a health check here
print(keras.backend.tensorflow_backend._get_available_gpus())
# status = 200 if health else 404
return flask.Response(response='\n', status=200, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def generateImage():
print("invocations endpoint has been invoked")
if (flask.request.content_type.split('/')[0]=="multipart") and ('form' in flask.request.content_type.split('/')[1]):
print("The file is multipart/form-data")
if 'content' in request.files:
CONTENT_IMAGE = request.files['content']
CONTENT_IMAGE.save(content_img)
x = True
else:
x = False
if 'style' in request.files:
STYLE_IMAGE = request.files['style']
STYLE_IMAGE.save(style_img)
y = True
else:
y = False
if (x & y) == False :
return flask.Response(response="Require two images", status=400, mimetype='text/plain')
elif (flask.request.content_type.split('/')[0]=="application") and (flask.request.content_type.split('/')[1]=="json"):
print("The file is application/json")
if 'content' in request.get_json():
CONTENT_DATA = request.get_json()['content']
CONTENT_IMAGE = base64.b64decode(CONTENT_DATA)
with open(content_img, 'wb') as f:
f.write(CONTENT_IMAGE)
x = True
else:
x = False
if 'style' in request.get_json():
STYLE_DATA = request.get_json()['style']
STYLE_IMAGE = base64.b64decode(STYLE_DATA)
with open(style_img, 'wb') as f:
f.write(STYLE_IMAGE)
y = True
else:
y = False
if (x & y) == False :
return flask.Response(response="Require two images", status=400, mimetype='text/plain')
else:
return flask.Response(response='This supports application/json and multipart/form-data', status=200, mimetype='text/plain')
os.system("python3 Network.py "+ content_img +" "+style_img +" "+RESULT_PREFIX+ " --image_size "+str(IMAGE_SIZE)+ " --content_weight "+str(CONTENT_WEIGHT)+ " --style_weight "+str(STYLE_WEIGHT)+ " --style_scale "+str(STYLE_SCALE)+" --total_variation_weight "+str(TOTAL_VARIATION_WEIGHT)+" --content_loss_type "+str(CONTENT_LOSS_TYPE)+" --num_iter "+str(NUM_ITERATIONS)+" --model "+MODEL+" --rescale_image "+RESCALE_IMAGE+" --maintain_aspect_ratio "+MAINTAIN_ASPECT_RATIO+" --content_layer "+CONTENT_LAYER+" --init_image "+INITIALIZATION_IMAGE+" --pool_type "+POOLING_TYPE+" --preserve_color "+PRESERVE_COLOR+" --min_improvement "+str(MIN_IMPROVEMENT))
return send_file(FINAL_IMAGE_PATH, mimetype='image/png') | [
"noreply@github.com"
] | noreply@github.com |
81b2518e870ffcb3af3a4398b5110cde62a69870 | 86d165d547ca57f8be675c6d60cbe943bd0330e2 | /consumers/faust_stream.py | 0a9b0ab66d2b6b013f6db7e0d67b2a79bbf1a479 | [] | no_license | guhgg/optimizing-public-transportation-udacity | c557831cef32f40eeed01e1650ad2778d0b9b746 | a93e60c4260a8a9d0f1732fb608da39533e1e22d | refs/heads/master | 2023-04-16T16:17:13.392122 | 2021-03-28T18:22:37 | 2021-03-28T18:22:37 | 352,405,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import logging
import faust
logger = logging.getLogger(__name__)
class Station(faust.Record):
stop_id: int
direction_id: str
stop_name: str
station_name: str
station_descriptive_name: str
station_id: str
order: int
red: bool
blue: bool
green: bool
class TransformedStation(faust.Record):
station_id: int
station_name: str
order: int
line: str
app = faust.App("stations-stream", broker="kafka://localhost:9092", store="memory://")
topic = app.topic("org.chicago.cta.stations", value_type=Station)
out_topic = app.topic("org.chicago.cta.stations.table.v1", partitions=1)
table = app.Table(
name="org.chicago.cta.stations.table.v1",
default=TransformedStation,
partitions=1,
changelog_topic=out_topic
)
@app.agent(topic)
async def station_event(events):
async for e in events:
if e.red:
line = 'red'
elif e.green:
line = 'green'
else:
line = 'blue'
transformed_station = TransformedStation(e.station_id, e.stop_name, e.orderm line)
table[transformed_station.station_id] = transformed_station
if __name__ == "__main__":
app.main()
| [
"gustavo42.sg@gmail.com"
] | gustavo42.sg@gmail.com |
e87d996a02ddd0d6019da925d9e3fd743ff87f67 | 16f2e819ff0483f498428f9cd2dd81ca5515d5f8 | /image_processing/jpg_conversion.py | 515b9f1a5b025952da2265e96f6c5d5da3ea214f | [] | no_license | biosemantics/Authors-Utilities | 5253537581573790b0e2ab8bae29b56b4674eaba | 2ab8f353cf5ba71dd9e3fe670111566e7025e3fb | refs/heads/master | 2023-04-23T03:15:19.435920 | 2021-05-12T18:09:12 | 2021-05-12T18:09:12 | 295,469,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | from PIL import Image
import os
import shutil
directory = os.getcwd() + '/full_res/'
for filename in os.listdir(directory):
if filename.endswith('.png'):
print(filename)
img = Image.open(directory + filename)
img.load() # required for img.split()
# replace alpha channel background
background = Image.new("RGB", img.size, (255, 255, 255))
background.paste(img, mask=img.split()[3]) # 3 is the alpha channel
# save RBG image as jpg
old_name = str(os.path.splitext(filename)[0])
background.save(directory + old_name + '.jpg', 'JPEG', quality=100)
# move .png file to processed/ (no longer needed)
source = str(directory) + str(filename)
destination = str(os.getcwd()) + '/processed/' + str(filename)
shutil.move(source, destination)
# success message
print(filename + ' converted to jpg') | [
"nwgiebink@gmail.com"
] | nwgiebink@gmail.com |
488762cf517b1c0a114fa3e63e1b519f905591ff | 3bcf2e79a5739f214f2d35c2543307ad99935a49 | /FirstPythonEnv/Scripts/easy_install-3.8-script.py | 1b346c49698b9ebed3221aff92428a15172e9489 | [] | no_license | ap1536/FirstPython | f30b5aed8f09851f834cc15464ef3441539ce867 | 076c059ae95e3d0fe6b8f070398510a49f62737c | refs/heads/master | 2020-08-22T05:09:24.963214 | 2019-10-20T17:52:09 | 2019-10-20T17:52:09 | 216,323,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | #!C:\Users\apabbathi2\PycharmProjects\FirstPython\FirstPythonEnv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"aravind.pabbathi@gmail.com"
] | aravind.pabbathi@gmail.com |
ac553a17ecdc2e6c6a318f37d66896296ee0679b | 445098fb6d20f7280156119586cf7c4f07cbafa9 | /Python/stringmanipulation.py | 54c78749b4d411bdd77777cfcb1dd6d5e18cc66a | [] | no_license | Patwaris/DataScience | e2b1bd86a927db9efe6f701b6c5c830c6af2dda7 | 3a73acb1c1d1ed5b186e427d6fcda3842a9d4f1a | refs/heads/master | 2020-05-18T15:54:09.998438 | 2019-05-29T03:32:03 | 2019-05-29T03:32:03 | 184,512,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 3 08:45:49 2019
@author: BharathPatwari
"""
#Strings
name1="Bharath Patwari"
name2='abc'
print(type(name1))
print(type(name2))
#modify string content
print(name1[1])
name1[0]='A' #'str' object does not support item assignment
name2=name1+'Hyd'
print(name2)
name1=name1.upper()
print(name1)
#replace
name1=name1.replace('BHARATH','Vivek')
name1=10
isinstance(name1,str)
isinstance(name1,int) | [
"bharath35@gmail.com"
] | bharath35@gmail.com |
d3c366292f09c31949649f09f59f18df63e790be | 1cad3fa574350c9be29282f518f4927efb26e18f | /http_api/api_intro.py | 0c1edd60bc8199386fc7a5c103448f64018fc4dc | [] | no_license | EvgeniyBudaev/python_learn | c72fdc2c5a84dae03bfd6e5afc5453b795ada17f | 1a5385e3412832dd9017536dad1140138143600e | refs/heads/main | 2023-05-27T04:41:50.754525 | 2021-05-27T02:11:18 | 2021-05-27T02:11:18 | 337,162,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import requests
# url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&starttime=2014-01-01&endtime=2014-01-02'
# response = requests.get(url, headers={'Accept':'application/json'})
url = 'https://earthquake.usgs.gov/fdsnws/event/1/query?'
response = requests.get(url, headers={'Accept':'application/json'}, params={
'format': 'geojson',
'starttime': '2014-01-01',
'endtime': '2014-01-02'
})
# print(response.text)
# print(response.json())
# print(type(response.json())) # dict
data = response.json()
print(data['features'][0]['properties']['place']) | [
"49036840+EvgeniyBudaev@users.noreply.github.com"
] | 49036840+EvgeniyBudaev@users.noreply.github.com |
ca2d6c48dc06478aaaafd6c9257868331e9f456c | 4682536471f05076557f7f76f8a8dabad2bb135d | /alambre/polls/models.py | 1364143bc36fb6ec53642ca4717543fe71388409 | [] | no_license | alamtellez/Actividades-Django | c3e876e8c2ffd0a21846a2b26e4cdf10c1dbfcf3 | a71f29db14ac43058c9912a821b250b7ad681f5e | refs/heads/master | 2021-06-10T12:51:09.107970 | 2017-02-14T00:15:55 | 2017-02-14T00:15:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text | [
"A01205569@itesm.mx"
] | A01205569@itesm.mx |
556bda8140b851daec5eec1f491991432059f048 | a350534eaff612558bb2dd116f2fb924259b9938 | /.ipynb_checkpoints/orphanfinalfind_2-checkpoint.py | 39621fb9d0d7eb6743a552b82ce81ed9cb66860b | [] | no_license | zhengyun0215/mock | e8be8cb5b0efab7f74d49eeaa818deb04641480d | 211b303740df55b38a8c0c603178fb7ed23f5d5e | refs/heads/master | 2023-04-09T16:50:19.052204 | 2020-12-01T07:39:46 | 2020-12-01T07:39:46 | 312,835,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | import h5py
import numpy as np
import time
import multiprocessing as mp
def selection(x):
print("Start! :)")
start = time.time()
print('The snapshot id is %s'%x)
orpnum = np.loadtxt('/home/yunzheng/mock/orphan_new/orphantabel_new/snapshot_%s/fsnapnum.txt'%x)
vel_ratio = np.load('/home/yunzheng/mock/orphan/vel_ratio.npy')
t_infall = np.loadtxt('/home/yunzheng/mock/orphan_new/t_infall_new/t_infall_%s.txt'%x)
orpnum = np.array(orpnum,dtype = int)
constant = (0.94*(0.51**0.6)+0.6)/0.86
print(constant)
orphantable_final = []
orphan_final = [[]for _ in range(100)]
fsnap_final = np.zeros(100)
m = 0
for i in range(100):
if orpnum[i] > 0:
j = 0
Nsnap = i
print("******Snapshot******:%d"%Nsnap)
#่ฏปๅNsnap่ฟไธชsnapshot็็ธๅ
ณๆไปถ
sub_file = h5py.File('/home/cossim/CosmicGrowth/6610/subcat2/SubSnap_%03d.hdf5'%(i),'r')['Subhalos'][...]
sub_file_1 = h5py.File('/home/cossim/CosmicGrowth/6610/subcat2/SubSnap_%03d.hdf5'%(i+1),'r')['Subhalos'][...]
cen_find = h5py.File('/home/cossim/CosmicGrowth/6610/subcat2/SubSnap_%03d.hdf5'%(i+1),'r')['Membership/GroupedTrackIds'][...]
host_file = np.loadtxt('/home/yunzheng/mock/orphan/halosize/hosthalo_%s.dat'%(i))
orphantable = np.load('/home/yunzheng/mock/orphan_new/orphantabel_new/snapshot_%s/orphantable_savetest_%s.npy'%(x,i))
for q in range(orpnum[Nsnap]):
subhaloid = np.array(orphantable[q][0],dtype = int)
subhostid = sub_file[subhaloid]['HostHaloId']
if subhostid!=-1:
Msat = host_file[subhostid][1]
if Msat > 0:
hosthaloid_1 = sub_file_1[subhaloid]['HostHaloId']
cenid = cen_find[hosthaloid_1][0]
cenhostid = sub_file[cenid]['HostHaloId']
if cenhostid != -1:
Mcen = host_file[cenhostid][1]
mass_ratio = Mcen / Msat
t = constant * vel_ratio[Nsnap] * mass_ratio / (np.log(1+mass_ratio))
t_in = t_infall[Nsnap]
if t_in <= t:
j = j +1
m = m +1
fsnap_final[Nsnap] = fsnap_final[Nsnap] + 1
orphantable_final.append(orphantable[q])
orphan_final[Nsnap].append(orphantable[q])
np.save('/home/yunzheng/mock/orphan_new/final_selection_new/snapshot_%s/snap_%d.npy'%(x,i),orphan_final[Nsnap])
print("the orphan number :%d"%j)
print("For each snapshot :%d"%(fsnap_final[Nsnap]))
print("the total number :%d"%m)
print("the total number :%d"%(np.sum(fsnap_final)))
end = time.time()
print("time spent : %s minutes"%((end - start)/60))
np.save('/home/yunzheng/mock/orphan_new/final_selection_new/snapshot_%s/orphantable_final.npy'%(x),orphantable_final)
np.savetxt('/home/yunzheng/mock/orphan_new/final_selection_new/snapshot_%s/final_orpnum.txt'%(x),fsnap_final)
dat = np.array([84,85,86,87,88,89,91,92,94,95,97,99])
def multicore():
pool = mp.Pool(processes = 36)
res = pool.map(selection,dat)
pool.close()
pool.join()
print("bingo! yes!")
if __name__ == '__main__':
multicore()
| [
"yunzheng@sjtu.edu.cn"
] | yunzheng@sjtu.edu.cn |
6fb67619459eec708bf1dd4d7a8bb3558f39f0ba | 27a9efc061c6f4dea8e9f68c1d7117a3398ff144 | /python/basics/pipeg.py | 25af32aed90818cbccaebde1e970bd5869aa225a | [] | no_license | SpyderScrypt/Cheatsheets | 55cda3a33f196dc6701ff5648760a44f9ba78992 | ddc657ef0e829f978a65bf11347ae3e66a475e4a | refs/heads/master | 2023-01-14T08:53:51.389527 | 2019-06-25T09:48:58 | 2019-06-25T09:48:58 | 167,225,725 | 0 | 0 | null | 2023-01-04T01:02:58 | 2019-01-23T17:37:47 | HTML | UTF-8 | Python | false | false | 221 | py | # sudo python3.6 -m pip install camelcase
# pip version pip 8.1.1
# to see location of installed package eg - pip3 show beautifulsoup4
import camelcase
c = camelcase.CamelCase()
txt = "hello world"
print(c.hump(txt))
| [
"spiderscrypt@gmail.com"
] | spiderscrypt@gmail.com |
27666c2894fd7ea88d376a028d199719a60a7064 | c3c9f94ab2723a145ed6a6417d7fbeb035aa0ec0 | /patients/forms.py | 2e49b116d7c6586cf72a837ab0fb07d1c0e3cf89 | [] | no_license | Amisiyah/cabinetMedical | 53e75a3d8c35aedcc5c4fc5e5fba638aa28c8621 | 4b4fc837c2738332e854fafcb461298dcf4e09cf | refs/heads/master | 2020-04-12T12:08:30.633622 | 2018-12-19T19:41:57 | 2018-12-19T19:41:57 | 157,517,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django import forms
from .models import Patient
class PatientForm(forms.ModelForm):
class Meta:
fields = ['first_name', 'middle_name', 'last_name', 'email', 'pin', 'address', 'city', 'country']
model = Patient
| [
"bogdy.ponta@gmail.com"
] | bogdy.ponta@gmail.com |
896e5366094a1ea2ac97ff558135f0349a7a0671 | f0606525fe4c431fed42a7c6c06d6720a602cdd7 | /proyecto3/hw3_clustering.py | b79f6b0b21072440a2e7fba8882105fd64a39fbd | [] | no_license | yagocasasnovas/cursoML | c1567fbdedb61fe070e00a22d2c9c62e33678b14 | c0e382881612a5d9600082312885271a9e1a6b40 | refs/heads/master | 2021-08-22T08:41:30.584915 | 2017-11-29T19:36:00 | 2017-11-29T19:36:00 | 104,351,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,502 | py | #proyecto3
#5 clusters
#10 iterations
from __future__ import division
import numpy as np
import sys
import math
from random import randint
import copy
import csv
from scipy.stats import multivariate_normal
#import time
#start_time = time.time()
def prob(x, mean, std, size):
inversecov = np.linalg.inv(std)
toro = x - mean
ff = np.dot(inversecov,toro)
js = np.dot(toro,ff)
js = (-1) * js / 2
ee = np.exp(js)
determ = np.linalg.det(std)
#print determ
determ1 = np.power(determ,1/2)
#print determ1
determ2 = np.power(2 * np.pi,size/2)
determ3 = 1 / (determ1*determ2)
out = determ3 * ee
#print out
#raw_input()
return out
#e = np.exp(-(x-mean)*(x-mean)/(2*std*std))
#return (1 / (np.sqrt(2*np.pi) * std*std)) * e
K=5
it=10
centroids = []
centroids_set = set()
threshold = 0.000001
end = 0
X = np.genfromtxt(sys.argv[1], delimiter=",")
number_att = len(X[0])
instances = len(X)
nk = 0
while nk < 5:
l = np.random.randint(0,high=len(X)-1)
if l not in centroids_set:
centroids_set.add(l)
centroids.append(l)
nk = nk + 1
centroids_x = []
for c1 in centroids:
centroids_x.append(X[c1])
#centroids_EM = centroids_x[:]
for iter in range(it):
#Obtenemos C
C = {}
for idx, x in enumerate(X):
cbueno = -1
min = 1000000000000
for idy, c in enumerate(centroids_x):
dist = np.linalg.norm(x-c)
if min > dist:
min = dist
cbueno = idy
C[idx] = cbueno
#obtenemos mu
centroids_x_temp = centroids_x[:]
centroids_x = []
for idx, c in enumerate(centroids_x_temp):
sum_partial = np.zeros(number_att)
i_c = 0
for key, value in C.iteritems():
if value == idx:
sum_partial = sum_partial + X[key]
i_c = i_c + 1
sum_partial = sum_partial/i_c
centroids_x.append(sum_partial)
kks = 0
itt = iter + 1
namefile = "centroids-"+str(itt)+".csv"
with open(namefile, 'w') as csvfile:
for aa in centroids_x:
ee = 0
for idd in aa:
if ee != 0:
csvfile.write(',')
csvfile.write(str(idd))
ee = ee + 1
csvfile.write('\n')
##########EM GMM
##normalize data
varianzas = np.zeros(len(X[0]))
for i in range(len(X[0])):
suma = 0
for j in range(instances):
suma = suma + X[j][i]
media = suma / instances
suma2 = 0
for jj in range(instances):
suma2 = suma2 + (X[jj][i] - media)*(X[jj][i] - media)
hg = suma2/instances
varianza = np.power(hg,1/2)
varianzas[i] = varianza
###inicializar means: centroids_EM
centroids_set_EM = set()
nk = 0
while nk < 5:
l = np.random.randint(0,high=len(X)-1)
if l not in centroids_set_EM:
centroids_set_EM.add(l)
centroids.append(l)
nk = nk + 1
centroids_EM = []
for c1 in centroids_set_EM:
centroids_EM.append(X[c1])
### inicializar sigmas dxd donde d = numero de atributos
sigmas = []
factor = 1
iden = np.identity(number_att)
gh = iden*varianzas
for k in range(K):
sigmas.append(gh)
##distribution
distrib = np.zeros(K)
for k in range(K):
distrib[k] = (1/K)
for iteration in range(it):
#######################3###E Step
fi_x_vector = []
for i in range(instances):
fi_k_vector = np.zeros(K)
for k in range(K):
#pk = prob(X[i],centroids_EM[k],sigmas[k],number_att)
pk = multivariate_normal.pdf(X[i],centroids_EM[k],sigmas[k],allow_singular=True)
pkk = pk * distrib[k]
ss = 0
for k1 in range (K):
#print str(i) + ' ' + str(k) + ' ' + str(k1) + ' ' + str(len(centroids_EM)) + ' ' + str(len(sigmas))+' ' + str(len(distrib))
#print str(i) + ' ' + str(k) + ' ' + str(k1)
#p = prob(X[i],centroids_EM[k1],sigmas[k1],number_att)
p = multivariate_normal.pdf(X[i],centroids_EM[k1],sigmas[k1],allow_singular=True)
pp = p * distrib[k1]
ss = ss + pp
fi_k_vector[k] = pkk/ss
fi_x_vector.append(fi_k_vector)
#######################M Step
##########define nk
n_vector = np.zeros(K)
for k in range(K):
n_temp_k = 0
for i in range(instances):
n_temp_k = n_temp_k + fi_x_vector[i][k]
n_vector[k] = n_temp_k
#print n_vector
#raw_input()
#print np.linalg.norm(n_vector)
#update distrib
for k in range(K):
ty = n_vector[k]
ty = ty / instances
distrib[k]=ty
#update mean
cuscus = 0
for i in range(instances):
as1 = fi_x_vector[i][k]*X[i]
cuscus = cuscus + as1
centroids_EM[k] = cuscus/n_vector[k]
#update sigma
cuscus2 = 0
for i in range(instances):
ps = X[i] - centroids_EM[k]
sw = np.outer(ps,ps.T)
sw1 = fi_x_vector[i][k]*sw
cuscus2 = cuscus2 + sw1
sigmas[k] = cuscus2/n_vector[k]
kks = 0
itt1 = iteration + 1
namefile1 = "pi-"+str(itt1)+".csv"
with open(namefile1, 'w') as csvfile1:
for d in distrib:
csvfile1.write(str(d))
csvfile1.write('\n')
#print centroids_EM
#raw_input()
itt2 = iteration + 1
namefile2 = "mu-"+str(itt2)+".csv"
with open(namefile2, 'w') as csvfile2:
for c in centroids_EM:
ee = 0
for l in range(len(c)):
if ee != 0:
csvfile2.write(',')
csvfile2.write(str(c[l]))
ee = ee + 1
csvfile2.write('\n')
itt3 = iteration + 1
for k in range(K):
namefile3 = "Sigma-"+str(k+1)+"-"+str(itt3)+".csv"
with open(namefile3, 'w') as csvfile3:
for n in range(number_att):
ee = 0
for nn in range(number_att):
if ee != 0:
csvfile3.write(',')
csvfile3.write(str(sigmas[k][n][nn]))
ee = ee + 1
csvfile3.write('\n')
| [
"yago.casasnovas@gmail.com"
] | yago.casasnovas@gmail.com |
9301f373603392c31e4ef37ab57d6eace6eb163f | cf470f7d3fd0ea481970bcdedcd869258f692d05 | /aces_1.2/python/bin/create_aces_config | 198eabe513ced3b60c560b6793299b56d29d36e7 | [
"LicenseRef-scancode-unknown-license-reference",
"AMPAS"
] | permissive | colour-science/OpenColorIO-Configs | 3acef083127b698eb3252b45d724dfd4f5346c1a | b0a3ae218c24ed452e01ac1282d0b40e31dede6e | refs/heads/master | 2023-09-03T11:51:31.862794 | 2022-04-14T20:17:13 | 2022-04-14T20:17:13 | 54,505,320 | 619 | 440 | NOASSERTION | 2022-04-14T20:17:14 | 2016-03-22T20:06:48 | Roff | UTF-8 | Python | false | false | 802 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: AMPAS
# Copyright Academy of Motion Picture Arts and Sciences
"""
Creates the *ACES* configuration.
"""
from __future__ import division
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from aces_ocio.generate_config import main
__author__ = (
'Haarm-Pieter Duiker, Thomas Mansencal, Stephen Hill, Kevin Wheatley, '
'Joseph Goldstone')
__copyright__ = (
'Copyright (C) 2014-2021 Academy of Motion Picture Arts and Sciences')
__license__ = 'Academy of Motion Picture Arts and Sciences License Terms'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = 'acessupport@oscars.org'
__status__ = 'Production'
__all__ = []
if __name__ == '__main__':
main()
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com | |
a973b42be8042a611914d525d8716e478b9d9886 | 6ae42cd7589829a5a6b130392b650c9f9e96bfcf | /MCMC/mcmc | 91c5f47ad001eba780c523aae4705be4b59924aa | [] | no_license | kearnsw/MachineLearning | c77afecea67da39d3c37116436cb93d14a577b9c | 5560c5b5e960cf1f21a6295572abfe66c3586d04 | refs/heads/master | 2021-01-17T20:00:14.939749 | 2017-08-04T20:59:04 | 2017-08-04T20:59:04 | 53,383,293 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,646 | #!/usr/bin/env python
import argparse
import numpy as np
import math
from decimal import Decimal
def parse_args():
cli = argparse.ArgumentParser()
cli.add_argument("--mu", default=None, type=Decimal, help="initial guess at mean of distribution")
cli.add_argument("--mu_min", type=Decimal, help="mean minimum value to be explored")
cli.add_argument("--mu_max", type=Decimal, help="mean maximum value to be explored")
cli.add_argument("--sigma", default=None, type=Decimal, help="initial guess at std. dev. of distribtuion")
cli.add_argument("--sigma_min", type=Decimal, help="std. dev. minimum value to be explored")
cli.add_argument("--sigma_max", type=Decimal, help="std. dev. maximum value to be explored")
cli.add_argument("--samples", type=int, help="number of samples taken")
cli.add_argument("--stepsize", type=Decimal, help="step size of metropolis algorithm")
return cli.parse_args()
def gaussian(x, mu, sig):
return 1/np.sqrt(2*Decimal(math.pi)*sig**2) * np.exp(-(x - mu)**2 / (2 * sig**2))
def pdf(data, mu, sigma):
total = 1
for point in data:
total *= gaussian(point, mu, sigma)
return total
if __name__ == "__main__":
args = parse_args()
with open("data/data.out", "r") as f:
data = []
for line in f:
data.append(Decimal(line.strip()))
mu = Decimal(np.random.random()) * (args.mu_max - args.mu_min) + args.mu_min
sigma = Decimal(np.random.random()) * (args.sigma_max - args.sigma_min) + args.sigma_min
if args.mu:
mu = args.mu
if args.sigma:
sigma = args.sigma
MAX_ITERATION = args.samples
print("{0},{1},{2}".format("mu", "sigma", "posterior"))
for i in range(MAX_ITERATION):
s1 = Decimal(np.random.random()) * args.stepsize * 2 - args.stepsize
mu_new = mu + s1
s2 = Decimal(np.random.random()) * args.stepsize * 2 - args.stepsize
sigma_new = sigma + s2
if mu_new > args.mu_max or mu_new < args.mu_min:
print(mu, sigma, y)
continue
if sigma_new > args.sigma_max or sigma_new < args.sigma_min:
print(mu, sigma, y)
continue
# Calculate posteriors
y = pdf(data, mu, sigma)
y_new = pdf(data, mu_new, sigma_new)
# Metropolis Algorithm
if y_new > y:
mu = mu_new
sigma = sigma_new
y = y_new
else:
r = np.random.random()
if y_new/y > r:
mu = mu_new
sigma = sigma_new
y = y_new
print("{0},{1},{2}".format(mu, sigma, y))
| [
"kearns391@gmail.com"
] | kearns391@gmail.com | |
4b67d3a8af902f3a7d173e51e4a21538664b3e5c | e173b0905c22dd874817aaf020d96d4aa3d394b2 | /web_flask/8-cities_by_states.py | bff886a79778e0a6d9196798cf5629628f812d0b | [] | no_license | HermesBoots/AirBnB_clone_v2 | c771caf7c38e778e2cfaed17a47d88decbe5b0c5 | 9bedb1610f4618711bdc07568e323b389140014b | refs/heads/master | 2020-07-06T19:23:50.688273 | 2019-09-03T22:21:20 | 2019-09-03T22:21:20 | 203,115,957 | 0 | 0 | null | 2019-08-19T06:57:01 | 2019-08-19T06:56:59 | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/python3
"""A simple Flask server using our real HBNB data"""
from flask import Flask, render_template
import models
site = Flask(__name__)
site.url_map.strict_slashes = False
@site.teardown_appcontext
def closeStorageAfterRequest(error):
"""Close and reload the storage engine between requests"""
models.storage.close()
@site.route('/cities_by_states')
def page_showStatesAndCities():
"""List all the stored states and the cities within them"""
states = models.storage.all('State').values()
return render_template('8-cities_by_states.html', states=states)
if __name__ == '__main__':
site.run(host='0.0.0.0', port=5000)
| [
"701@holbertonschool.com"
] | 701@holbertonschool.com |
d05ce141ecc9bf14ab3e7757f48348f9ccdd9d61 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/171/61531/submittedfiles/testes.py | 0394990bdadaa06eebff3565e0697e79fea81b66 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
def media(a):
soma=0
for i in range(0,len(a),1):
soma=soma+a[i]
media=soma/(len(a))
return(media)
n=int(input('digite numero de elementos da lista:'))
a=[]
for i in range(0,n,1):
numero=float(input('digite numero รก ser inserido na lista:'))
a.append(numero)
print('%.3f'%a[0])
print('%.3f'%a[len(a)-1])
print'%.3f'%(media(a))
print(a) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
951a4225099034a69b144669861e4bec80886d22 | 953bca4a31fab48d932917418c4c3739c9a6f002 | /Scripts/django-admin.py | 782fb780d8635a8d880b211bf5bbad24ba1f9493 | [] | no_license | loganAlambke/League1 | 75c03066152da9c21006d54d752cbcdb88299c0d | 519e70f746393665768666fd945b21290a155f76 | refs/heads/master | 2022-12-16T05:29:53.080044 | 2018-04-28T18:35:15 | 2018-04-28T18:35:15 | 131,349,512 | 0 | 0 | null | 2022-12-08T00:57:03 | 2018-04-27T22:13:37 | Tcl | UTF-8 | Python | false | false | 161 | py | #!c:\users\loganl~1\enviro~1\league\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"loganalambke@gmail.com"
] | loganalambke@gmail.com |
e58b36b05c142642d3001d70c865a8a112804449 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/projecteuler/ProjectEuler-master(2)/ProjectEuler-master/156.py | aafe0dea8075d2124fd3dc79cbb842ba780bd38f | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,015 | py | import sys
class Problem():
def __init__(self):
self.found = None
def solve(self):
count = 0
for digit in range(1, 10):
solution_sum = self.s(digit)
print(digit, solution_sum)
count += solution_sum
print(count)
def s(self, digit):
self.found = []
self.binary_search(1, 10**11, digit)
return sum(self.found)
def f(self, n, digit):
count = 0
factor = 1
while n // factor != 0:
lower_number = n - (n // factor) * factor
curr_number = (n // factor) % 10
higher_number = n // (factor * 10)
if curr_number < digit:
count += higher_number * factor
elif curr_number == digit:
count += higher_number * factor + lower_number + 1
else:
count += (higher_number + 1) * factor
factor *= 10
return count
def binary_search(self, lower, upper, digit):
if lower + 1 == upper:
if self.f(lower, digit) == lower:
self.found.append(lower)
return
middle = (lower + upper) // 2
lower_value = self.f(lower, digit)
upper_value = self.f(upper, digit)
middle_value = self.f(middle, digit)
if middle_value >= lower and middle >= lower_value:
self.binary_search(lower, middle, digit)
if upper_value >= middle and upper >= middle_value:
self.binary_search(middle, upper, digit)
def f_naive(self, n, digit):
return sum([self.count_naive(i, digit) for i in range(1, n+1)])
def count_naive(self, n, digit):
count = 0
while n > 0:
n, r = divmod(n, 10)
if r == digit:
count += 1
return count
def main():
problem = Problem()
problem.solve()
if __name__ == '__main__':
sys.exit(main())
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
a297e90f9d69e0e6b7d21b4e9b3f3fc54b33f6ba | a0a19aa93b76eb81c3969b5390d3d05c8c06a042 | /test_files/testss/api_test.py | 36496c3804ff0f0613fd849bc83d2a2663f40e1b | [] | no_license | DavidRoldan523/inbound_marketing_clean | 0d36db0e708ee233d0c100d049ac7a88090a5f83 | 543c21895d4e046cb19d8a5503049a452a74beee | refs/heads/master | 2020-06-10T23:50:24.887764 | 2019-06-28T16:52:41 | 2019-06-28T16:52:41 | 193,794,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import requests
if __name__ == '__main__':
list_emails_test = ['cjgalvisc@unal.edu.co',
'millenn96@gmail.com',
'hdhdhd@hjh.com',
'millenxxn96@gmail.com',
'cristian@yopmail.com',
'xzkgkfkfkrrkr@yopmail.com']
for email in list_emails_test:
url = f'https://emailverification.whoisxmlapi.com/api/v1' \
f'?apiKey=at_qutWRadV4EGYuHotdJgEt9keSKIic' \
f'&emailAddress={email}'
response = requests.get(url)
print(f"{email} || {response.json()['smtpCheck']}") | [
"cjgalvisc@unal.edu.co"
] | cjgalvisc@unal.edu.co |
e393b34c2c3d876a6b586e70e401e7d575d74237 | 608c5693def02fda0c5319bf044cfc6883e1cce6 | /islandsNumber200.py | 8770218c77f2d9d3c1a4b48f0032b74c78528b01 | [] | no_license | chandini21/DFS-2 | cf4903c992f220336a8b7997705a4149f5e2cc04 | 9c8d84e427a7af76f23e5144bf92739fd88b330f | refs/heads/master | 2023-04-12T22:44:28.712831 | 2021-04-19T22:00:32 | 2021-04-19T22:00:32 | 359,203,017 | 0 | 0 | null | 2021-04-18T16:55:41 | 2021-04-18T16:55:40 | null | UTF-8 | Python | false | false | 1,340 | py | """
Approach - BFS
TC - O(m*n)
SC - O(m*n)
"""
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if (not grid or len(grid) == 0):
return 0
rows = len(grid)
columns = len(grid[0])
count = 0
queue = []
for r in range(rows):
for c in range(columns):
if grid[r][c] == '1':
count += 1
grid[r][c] = '0'
queue.append([r, c])
while queue:
current = queue.pop(0)
directions = [[0,1], [0,-1], [1,0], [-1,0]]
for direct in directions:
dr = current[0] + direct[0]
dc = current[1] + direct[1]
#print("RC vals : ", dr,dc , "Val: ", grid[dr][dc])
if (dr >= 0 and dr < rows and dc >= 0 and dc < columns and grid[dr][dc] == "1"):
#print("making 0 at ", dr,dc)
grid[dr][dc] = '0'
#print(grid)
queue.append([dr,dc])
return count
| [
"chandinisri.805@gmail.com"
] | chandinisri.805@gmail.com |
b34734bccd0addbe7a3f95e5866fe250ba44c343 | e6ebd1f9e3968f6ed613e9f35e46716115e6e9c3 | /chapter4/demo2.py | 9d025bcb4070abbf880ddf67f5d49444fcbfdbdb | [] | no_license | huwanping001/Python | 897046d3d6d1b420befeefcaa2b9544efa7d1881 | 3c76278f7a9b216b28b8880e0108af3c550b9372 | refs/heads/main | 2023-08-21T00:45:17.991833 | 2021-10-18T13:47:52 | 2021-10-18T13:47:52 | 409,586,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # ๅญฆๆ ก๏ผๅๅท่ฝปๅๅทฅๅคงๅญฆ
# ๅญฆ้ข๏ผ่ชไฟกๅญฆ้ข
# ๅญฆ็๏ผ่กไธๅนณ
# ๅผๅๆถ้ด๏ผ2021/9/18 9:54
#ๆต่ฏๅฏน่ฑก็boolๅผ
print(bool(False)) #False
print(bool(0)) #False
print(bool(0.0)) #False
print(bool(None)) #False
print(bool('')) #False
print(bool("")) #False
print(bool(list())) #็ฉบๅ่กจ False
print(bool([])) #็ฉบๅ่กจ False
print(bool(())) #็ฉบๅ
็ป False
print(bool(tuple())) #็ฉบๅ
็ป False
print(bool({})) #็ฉบๅญๅ
ธ False
print(bool(dict())) #็ฉบๅญๅ
ธFalse
print(bool(set())) #็ฉบ้ๅ False
print('-----------------ๅ
ถไปๅฏน่ฑก็boolๅผๅไธบTrue------------------')
print(bool(18))
print(bool(True))
print(bool('xiaohu')) | [
"noreply@github.com"
] | noreply@github.com |
3df69c8078977d9b51a98b936360a4cf6bcf6b89 | 1260ce7869ce32d6b434afbf273273b7b1ebea2d | /lorentz_equivariant_gnn/architectures/EquivariantGNN/egnn_base.py | ddcd258aec9bfcb6cb6915b699a963195d25437c | [] | no_license | savvy379/Lorentz-Equivariant-GNN | b3b30e964cfa9af39adcb4e8b73bc78b4f8b7b5e | 3d1c74081bdd43387a7c530bce73580db379d22d | refs/heads/master | 2023-08-01T06:43:13.229014 | 2021-09-22T18:35:15 | 2021-09-22T18:35:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,499 | py | import sys, os
import logging
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.data import DataLoader
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve
from .utils import load_datasets
class EGNNBase(LightningModule):
def __init__(self, hparams):
super().__init__()
"""
Initialise the Lightning Module that can scan over different Equivariant GNN training regimes
"""
# Assign hyperparameters
self.save_hyperparameters(hparams)
def setup(self, stage):
# Handle any subset of [train, val, test] data split, assuming that ordering
self.trainset, self.valset = load_datasets(self.hparams["input_dir"], self.hparams["data_split"])
def train_dataloader(self):
if self.trainset is not None:
return DataLoader(self.trainset, batch_size=self.hparams["train_batch"], num_workers=1, shuffle=True)
else:
return None
def val_dataloader(self):
if self.valset is not None:
return DataLoader(self.valset, batch_size=self.hparams["val_batch"], num_workers=1)
else:
return None
def test_dataloader(self):
if self.testset is not None:
return DataLoader(self.testset, batch_size=1, num_workers=1)
else:
return None
def configure_optimizers(self):
optimizer = [
torch.optim.AdamW(
self.parameters(),
lr=(self.hparams["lr"]),
betas=(0.9, 0.999),
eps=1e-08,
amsgrad=True,
)
]
scheduler = [
{
"scheduler": torch.optim.lr_scheduler.StepLR(
optimizer[0],
step_size=self.hparams["patience"],
gamma=self.hparams["factor"],
),
"interval": "epoch",
"frequency": 1,
}
]
return optimizer, scheduler
def get_metrics(self, batch, output):
prediction = torch.sigmoid(output)
tp = (prediction.round() == batch.y).sum().item()
acc = tp / len(batch.y)
try:
auc = roc_auc_score(batch.y.bool().cpu().detach(), prediction.cpu().detach())
except:
auc = 0
fpr, tpr, _ = roc_curve(batch.y.bool().cpu().detach(), prediction.cpu().detach())
# Calculate which threshold gives the best signal goal
signal_goal_idx = abs(tpr - self.hparams["signal_goal"]).argmin()
eps = fpr[signal_goal_idx]
return prediction, acc, auc, eps
def training_step(self, batch, batch_idx):
output = self(batch).squeeze(-1)
loss = F.binary_cross_entropy_with_logits(output, batch.y.float())
prediction, acc, auc, inv_eps = self.get_metrics(batch, output)
self.log_dict({"train_loss": loss, "train_acc": acc}, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
output = self(batch).squeeze(-1)
loss = F.binary_cross_entropy_with_logits(output, batch.y.float())
prediction, acc, auc, eps = self.get_metrics(batch, output)
current_lr = self.optimizers().param_groups[0]["lr"]
self.log_dict({"val_loss": loss, "acc": acc, "auc": auc, "current_lr": current_lr}, on_step=False, on_epoch=True)
return {
"loss": loss,
"preds": prediction,
"acc": acc,
"auc": auc,
"eps": eps
}
def validation_epoch_end(self, step_outputs):
mean_eps = np.mean([output["eps"] for output in step_outputs])
if mean_eps != 0:
self.log_dict({"inv_eps": 1/mean_eps})
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure=None,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
# warm up lr
if (self.hparams["warmup"] is not None) and (
self.trainer.global_step < self.hparams["warmup"]
):
lr_scale = min(
1.0, float(self.trainer.global_step + 1) / self.hparams["warmup"]
)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams["lr"]
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def compute_radials(edge_index, x):
"""
Calculates the Minkowski distance (squared) between coordinates (node embeddings) x_i and x_j
:param edge_index: Array containing the connection between nodes
:param x: The coordinates (node embeddings)
:return: Minkowski distances (squared) and coordinate differences x_i - x_j
"""
row, col = edge_index
coordinate_differences = x[row] - x[col]
minkowski_distance_squared = coordinate_differences ** 2
minkowski_distance_squared[:, 0] = -minkowski_distance_squared[:, 0] # Place minus sign on time coordinate as \eta = diag(-1, 1, 1, 1)
radial = torch.sum(minkowski_distance_squared, 1).unsqueeze(1)
return radial, coordinate_differences
| [
"murnanedaniel@hotmail.com"
] | murnanedaniel@hotmail.com |
9cb0e2bb1bc7d746dadb931b170dd4cf089919b3 | 31ac7e8edbf74e795a0e4cb5c27b354ff730703d | /cleaners/stock_data_cleaner_remove_invalid_rows.py | d920d7276445b26a41327ad0d4eee8dcf5436953 | [] | no_license | webclinic017/operation_get_rich | 9cbd9d2432ac6e6f3b40242b8fb76881c8b86ac4 | d6aa1bd5a8399c0eee0ede2265f2eb4a43b09331 | refs/heads/master | 2023-06-01T09:25:15.619531 | 2021-06-24T18:33:12 | 2021-06-24T18:33:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | """
Some rows has a row like
"open close low high volume"
This cleaner cleans it
"""
import pandas
from pandas import DataFrame
FILENAME = '../stock_price_small.csv'
stock_price_df = pandas.read_csv(FILENAME) # type: DataFrame
stock_price_df.drop(
stock_price_df[stock_price_df['open'] == 'open'].index,
inplace=True
)
stock_price_df.to_csv('../stock_price_small.csv')
| [
"fkamili@thezebra.com"
] | fkamili@thezebra.com |
346a548b4981de4e2a5053a36edeae361dac28e9 | 345d9371a3e8703fe49214484a44f3bf3da9709f | /virtual/bin/easy_install-3.7 | 3b3f1b0d5584c943a814832f6aee3029301e7b43 | [
"MIT"
] | permissive | Atsalibram/Instagram | 456c69d699ae5eb145aa0123a1baafa13bb4b121 | d01ea60341686fb6ff2f1e6213ce255b6f03c101 | refs/heads/master | 2023-03-29T10:08:50.422674 | 2021-04-03T15:10:19 | 2021-04-03T15:10:19 | 351,752,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | 7 | #!/home/moringaaccess/Desktop/Django/Instagram/virtual/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"atsalibaram@gmail.com"
] | atsalibaram@gmail.com |
300dc5d3cf9ec6b7d67dca8ceb272fa0ad0e6d80 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/security_rule_py3.py | 0f8b6dc1a56b4743ed087bfac58ebfdafeb3318d | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 7,668 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group
specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or
destination IP range. Asterix '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes.
CIDR or destination IP ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security
group specified as destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_09_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, source_address_prefix: str=None, source_address_prefixes=None, source_application_security_groups=None, destination_address_prefix: str=None, destination_address_prefixes=None, destination_application_security_groups=None, source_port_ranges=None, destination_port_ranges=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.source_address_prefixes = source_address_prefixes
self.source_application_security_groups = source_application_security_groups
self.destination_address_prefix = destination_address_prefix
self.destination_address_prefixes = destination_address_prefixes
self.destination_application_security_groups = destination_application_security_groups
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| [
"noreply@github.com"
] | noreply@github.com |
4d30fa003a27138c61cb2b4d5177b1b5ec2f06c1 | f51430dab7b2189f05b1c884bfdfe3431b83ee54 | /test.py | 3f7eab105d6defde841af1a4d1f04c6f2bba2864 | [] | no_license | tsnaomi/echo-server | 6ce3bb5fadeeb3269208c5bd97c82f5411d898be | c9e071fc429685a037bed8167c7abbeca0a12583 | refs/heads/master | 2021-05-30T09:57:22.598162 | 2014-03-30T22:35:56 | 2014-03-30T22:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | #!/usr/bin/env python
import unittest
import socket
import echo_client
class TestEchoChamber(unittest.TestCase):
def test_short(self):
short_message = "darling"
self.assertEqual(short_message, echo_client.echo_client(short_message))
def test_long(self):
long_message = "3.1415926535897932384626433832795028841971693993"
self.assertEqual(long_message, echo_client.echo_client(long_message))
def test_exact(self):
exact_message = "abcdefghijklmnopqrstuvwxyz123456"
self.assertEqual(exact_message, echo_client.echo_client(exact_message))
if __name__ == '__main__':
unittest.main()
| [
"tsnaomi@users.noreply.github.com"
] | tsnaomi@users.noreply.github.com |
8ddf94de860845ad1dc832ae0a985b79738970d0 | 1acfd11ff9b30fb9392bac1e6afe9fc2b715b21e | /gst.py | 9b5a74a347cc1d7bde5faba48235fdf566ace141 | [
"BSD-3-Clause"
] | permissive | Volvagia356/mobile-gst | e06d82334df4d43da71e8b93ca677fe71c1ca2f4 | 0887c2f412a3ab0a92ed04ba91645df4383e739b | refs/heads/master | 2021-01-23T11:47:49.981069 | 2016-03-01T02:03:26 | 2016-03-01T02:03:26 | 34,137,357 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,180 | py | import requests
from time import time
from bs4 import BeautifulSoup
class FWDC(requests.Session):
def __init__(self, *args, **kwargs):
self.fwdc_data = {}
self.fwdc_data['FAST_CLIENT_WINDOW__'] = "FWDC.WND-0000-0000-0000"
self.fwdc_data['FAST_CLIENT_AJAX_ID__'] = 0
super(FWDC, self).__init__(*args, **kwargs)
def before_request(self):
self.fwdc_data['FAST_CLIENT_WHEN__'] = str(int(time()*1000))
self.fwdc_data['FAST_CLIENT_AJAX_ID__'] += 1
def after_request(self, response):
try:
self.fwdc_data['FAST_VERLAST__'] = response.headers['Fast-Ver-Last']
self.fwdc_data['FAST_VERLAST_SOURCE__'] = response.headers['Fast-Ver-Source']
except KeyError:
pass
def get(self, *args, **kwargs):
self.before_request()
if "params" not in kwargs: kwargs['params'] = {}
kwargs['params'].update(self.fwdc_data)
r = super(FWDC, self).get(*args, **kwargs)
self.after_request(r)
return r
def post(self, *args, **kwargs):
self.before_request()
if "data" not in kwargs: kwargs['data'] = {}
kwargs['data'].update(self.fwdc_data)
r = super(FWDC, self).post(*args, **kwargs)
self.after_request(r)
return r
class GST():
def __init__(self):
self.fwdc = FWDC()
def load_front_page(self):
self.fwdc.get("https://gst.customs.gov.my/TAP/_/")
self.fwdc.get("https://gst.customs.gov.my/TAP/_/", params={'Load': "1"})
def click_lookup_gst_status(self):
data = {
'DOC_MODAL_ID__': "0",
'EVENT__': "b-m",
'TYPE__': "0",
'CLOSECONFIRMED__': "false",
}
self.fwdc.post("https://gst.customs.gov.my/TAP/_/EventOccurred", data=data)
def select_radio_button(self, button_id):
data = {
button_id: "true",
'DOC_MODAL_ID__': "0",
}
self.fwdc.post("https://gst.customs.gov.my/TAP/_/Recalc", data=data)
def enter_text_field(self, field_id, text):
data = {
field_id: text,
'DOC_MODAL_ID__': "0",
}
r = self.fwdc.post("https://gst.customs.gov.my/TAP/_/Recalc", data=data)
r.encoding = "utf-8-sig"
return r.json()
def select_gst_num_radio(self):
self.select_radio_button("e-4")
def select_business_num_radio(self):
self.select_radio_button("e-7")
def select_business_name_radio(self):
self.select_radio_button("e-9")
def enter_gst_num(self, gst_num):
return self.enter_text_field("e-6", gst_num)
def enter_business_num(self, business_num):
return self.enter_text_field("e-8", business_num)
def enter_business_name(self, business_name):
return self.enter_text_field("e-a", business_name)
class GSTError(Exception): pass
def find_field_update(fwdc_response, field):
for field_update in fwdc_response['Updates']['FieldUpdates']:
if field_update['field'] == field:
return field_update
def is_field_visible(fwdc_response, field):
field_update = find_field_update(fwdc_response, field)
if field_update:
return field_update.get("visible", False)
return False
def parse_business_table(table_html):
FIELDS = ["gst_num", "legal_name", "trading_name", "date", "status"]
soup = BeautifulSoup(table_html)
rows = soup.tbody.find_all("tr", class_="DataRow", recursive=False)
data = []
for row in rows:
cells = row.find_all("td", recursive=False)
row_data = []
for cell in cells:
cell_data = cell.get_text()
row_data.append(cell_data)
row_dict = dict(zip(FIELDS, row_data))
data.append(row_dict)
return data
def get_table_from_response(fwdc_response):
field_update = find_field_update(fwdc_response, "e-h")
if not field_update:
if is_field_visible(fwdc_response, "e-k"):
raise GSTError("No Registrants Found!")
elif is_field_visible(fwdc_response, "e-p"):
raise GSTError("Over 100 results found. Please narrow search terms!")
elif is_field_visible(fwdc_response, "e-s"):
raise GSTError("Server under maintenance. Please check back later!")
else:
raise GSTError("Unknown error occured!")
table_html = field_update['value']
return parse_business_table(table_html)
def prepare_GST():
s = GST()
s.load_front_page()
s.click_lookup_gst_status()
return s
def search_gst_num(gst_num):
s = prepare_GST()
s.select_gst_num_radio()
response = s.enter_gst_num(gst_num)
return get_table_from_response(response)
def search_business_num(business_num):
s = prepare_GST()
s.select_business_num_radio()
response = s.enter_business_num(business_num)
return get_table_from_response(response)
def search_business_name(business_name):
s = prepare_GST()
s.select_business_name_radio()
response = s.enter_business_name(business_name)
return get_table_from_response(response)
| [
"volvagia356@gmail.com"
] | volvagia356@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.