blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6bae26654c6acef28516545d310b66c3a0191796 | b71fbf427779ec10ab88a9b6d16f8797e24f23c9 | /my_env/bin/epylint | 2d22a7c468ec7552fefef37ee703dc59a0e47722 | [] | no_license | Promphut/mydjango | a01b2d9f7814f697a37b3a7daf9b3cfa3ce9a7b1 | 7aecac416dc8c9af4eb3ff9027f1c451b7c25966 | refs/heads/master | 2020-04-23T04:45:35.631874 | 2019-02-15T19:38:16 | 2019-02-15T19:38:16 | 170,918,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/home/petch/Desktop/project/django-test/my_env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"phiphut.ch@gmail.com"
] | phiphut.ch@gmail.com | |
636f0fdca26af0cf9e7468e5467418ca250a8028 | 42f13ecc7c3ca22bab065220c7569d1b9b4f2163 | /face_detection.py | a1611179eb19594bd44fe5d605addffd365f47e2 | [] | no_license | akashshah03/Face-Detection-using-python | bc35b9413a17ffe0d82ff4862f3a01a54284aa91 | 48ad8e766ba3ac43e7cc91ff1c0366f8d0ead164 | refs/heads/master | 2022-09-09T19:34:45.519757 | 2020-05-24T07:17:54 | 2020-05-24T07:17:54 | 266,486,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import cv2
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(0)
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display
cv2.imshow('img', img)
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
cv2.destroyAllWindows()
| [
"akashshah.dsc@gamil.com"
] | akashshah.dsc@gamil.com |
82ab554569c52ce1941616bf24b0ee3e3cbbbc79 | 3fa0779fe2a16c7bd5786fa33a7f553ebb2c92d7 | /venv/bin/django-admin | e9a9621eef1a6ef34fe26183042426a58a730f9d | [] | no_license | RrodriguezM/djangoReact | 30465097b89bac2ce50f821db0e81a4085e213f0 | 72d607911301c84555d74d8702ecf38ddae392e5 | refs/heads/master | 2022-11-26T11:29:57.946424 | 2020-07-27T10:51:41 | 2020-07-27T10:51:41 | 282,705,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | #!/Users/rafaelrodriguez/Documents/GitHub/djangoReact/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"rafael.a.rodriguez.m@gmail.com"
] | rafael.a.rodriguez.m@gmail.com | |
f9439e8f937e64169b19a169fc685663650a40c3 | 01bf83cb0b6b3a559ee29cbefd86d400725d76a1 | /lockedfile.py | 71735650a6e9b2b8a013b808839d1b4c656d91a3 | [] | no_license | iJunkie22/finderbackdoors | 50841da42c35859ca77c46a63efe1c0657e50335 | 2cfa4b56e699877d357506047670bf4b1f595bfb | refs/heads/master | 2021-01-11T03:49:23.743586 | 2018-02-09T03:12:35 | 2018-02-09T03:12:35 | 71,385,609 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | import os
import os.path
import stat
def binflagget(int1, rpad, flag_len=1):
return (int1 >> rpad) % (1 << rpad + flag_len)
def flagnpadlen(int1, exp_bit_len=None, *flag_lens):
work_bit_len = exp_bit_len
if exp_bit_len is None:
work_bit_len = int1.bit_length()
flag_sum = sum(flag_lens)
elbow_room = work_bit_len - flag_sum
return flag_sum, elbow_room
def testflag(sample1, target1, flag_len=1):
flag_l, pad_l = flagnpadlen(target1, None, 1)
return binflagget(sample1, pad_l, flag_l)
def hasrpad(int1, exp_bit_len=None, *flag_lens):
if exp_bit_len is None:
exp_bit_len = int1.bit_length()
elbow_room = exp_bit_len - sum(flag_lens)
if elbow_room <= 0:
return False
return int1 == ((int1 >> elbow_room) << elbow_room)
def bin_check(sample1, target1):
print target1
return bool(sample1 % (target1 * 2) // target1)
def is_file_locked(fp1):
return os.stat(fp1).st_flags & stat.UF_IMMUTABLE
if __name__ == '__main__':
print('hey')
print(hasrpad(0b0110, 4, 1, 1, 1))
print(bin(~0b0110))
print(binflagget(0b010010, 1))
print(binflagget(0b010010, 1, 4))
| [
"ijunkie22@gmail.com"
] | ijunkie22@gmail.com |
20f07e2ad4bf3481e705987bfa10220dd8f0913d | 43604e15351734ba2e04ea75cba1657c7a1b6a09 | /Practical7/bankersAlgo.py | fa677f9ebae48a0ef1c797ed411a5071acbb6af5 | [] | no_license | ani1311/operatingSystemLAB | e898682e6cba268fc2138779aebae6609463663e | f09b469d080d2216ae7dee6227d494c601a21654 | refs/heads/master | 2020-04-27T02:46:53.161716 | 2019-04-02T05:16:40 | 2019-04-02T05:16:40 | 174,004,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py |
noOfResources = 3
process = [1,2,3,4]
aquired = [[1,3,2],[1,0,1],[0,3,2],[4,5,1]]
needed = [[2,1,1],[1,0,3],[5,6,3],[1,3,3]]
available = [4,2,4]
def Print():
print("%-20s%-18s%-18s"%('Process no','Aquired','Needed'))
print("%-20s"%(' '),end = "")
for _ in range(2):
for i in range(noOfResources):
print("%-6s"%(str(chr(65 + i))),end = "")
print()
for i in range(len(process)):
print("%-20s"%(process[i]),end = "")
for j in range(noOfResources):
print("%-6s"%aquired[i][j],end = "")
for j in range(noOfResources):
print("%-6s"%needed[i][j],end = "")
print()
print("\nAvailable resources ",end = "")
for i in range(noOfResources):
print("%s = %d "%(str(chr(65 + i)),available[i]),end = "")
print("\n\n")
# print("\nAvailable resources A = %d b = %d c = %d"%(available[0],available[1],available[2]));
def delete(i):
for p in range(len(available)):
available[p] = available[p] + aquired[i][p]
process.pop(i)
aquired.pop(i)
needed.pop(i)
def getProcess():
for i in range(len(process)):
if(needed[i][0] < available[0] and needed[i][1] < available[1] and needed[i][2] < available[2]):
return i
return -1
def bankers():
solution = []
while(len(process) != 0):
p = getProcess()
if(p == -1):
return "NO POSSIBLE WAY OUT,SYSTEM UNSAFE"
solution.append("P" + str(process[p]))
delete(p)
Print()
return solution
Print()
print(bankers())
| [
"aniket.alur13111997@gmail.com"
] | aniket.alur13111997@gmail.com |
fc65c10732aa15e4f9e40c13517d56eff686a49e | d2dc0753adc03ba0f94c92498fe13b9d69d624a8 | /echobot/lib/python2.7/site-packages/watson_developer_cloud/alchemy_data_news_v1.py | e6ce7b315953f3a981ef5408583fc61678b8980c | [] | no_license | Markboerrigter/Sintbot | ed0ab0e85c592ffc87ad857c79a9c0ace9742314 | a8ec68297ee7ed72bf30a534636e68096855c23c | refs/heads/master | 2021-04-30T23:59:05.871466 | 2016-12-07T10:34:19 | 2016-12-07T10:34:19 | 79,110,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | # Copyright 2016 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The AlchemyData News service
(https://www.ibm.com/watson/developercloud/alchemy-data-news.html)
"""
from .watson_developer_cloud_service import WatsonDeveloperCloudService
class AlchemyDataNewsV1(WatsonDeveloperCloudService):
default_url = 'https://gateway-a.watsonplatform.net/calls'
def __init__(self, url=default_url, **kwargs):
WatsonDeveloperCloudService.__init__(self, 'alchemy_api', url, **kwargs)
def get_news_documents(self, start, end, max_results=10, query_fields=None, return_fields=None, time_slice=None,
next_page=None, dedup=None, dedup_threshold=None, rank=None):
if isinstance(return_fields, list):
return_fields = ','.join(return_fields)
params = {'start': start,
'end': end,
'maxResults': max_results,
'return': return_fields,
'timeSlice': time_slice,
'next': next_page,
'dedup': dedup,
'dedupThreshold': dedup_threshold,
'rank': rank}
if isinstance(query_fields, dict):
for key in query_fields:
params[key if key.startswith('q.') else 'q.' + key] = query_fields[key]
return self._alchemy_html_request(method_url='/data/GetNews', method='GET', params=params)
| [
"mark@greenorange.com"
] | mark@greenorange.com |
aafb03a34640ee73b19ceca0c8b793e83b89c284 | 2405a31eda2604d4c23976ae72154c4c9603112b | /DungeonFinder/messaging/tests/test_campaign_messaging.py | 10d9f4e5e9a3c5cf520dff483e4ac83d12dcf251 | [] | no_license | tomhamiltonstubber/dundjeon-finder | 1e9194ffb8b606960bc7b71adb6bf2cb99b02faa | 182064e9ba09da11318de6d0a403119704db2450 | refs/heads/master | 2023-02-22T18:46:45.215750 | 2021-01-03T22:21:41 | 2021-01-03T22:21:41 | 319,379,761 | 0 | 0 | null | 2021-01-03T22:21:42 | 2020-12-07T16:33:08 | Python | UTF-8 | Python | false | false | 8,862 | py | from django.test import TestCase
from django.urls import reverse
from DungeonFinder.common.test_helpers import AuthenticatedClient
from DungeonFinder.games.factories.games import CampaignFactory
from DungeonFinder.messaging.factories.messages import MessageFactory
from DungeonFinder.users.factories.users import GameMasterFactory, UserFactory
class CampMessageTestCase(TestCase):
def setUp(self):
self.player = UserFactory()
self.gm = GameMasterFactory()
self.campaign = CampaignFactory(creator=self.gm)
self.campaign.players.add(self.player)
self.player_client = AuthenticatedClient(user=self.player)
self.gm_client = AuthenticatedClient(user=self.gm.user)
self.admin = UserFactory(is_admin=True)
self.admin_client = AuthenticatedClient(user=self.admin)
self.oplayer_client = AuthenticatedClient()
self.feed_url = reverse('camp-message-feed', args=[self.campaign.pk])
self.add_msg = reverse('camp-message-add', args=[self.campaign.pk])
def test_create_message(self):
r = self.player_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
r = self.player_client.post(self.add_msg, {'text': 'I am a message'})
assert r.json() == {'count': 1}
r = self.player_client.get(self.feed_url)
self.assertContains(r, 'I am a message')
def test_create_message_gm(self):
r = self.gm_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
r = self.gm_client.post(self.add_msg, {'text': 'I am a message'})
assert r.json() == {'count': 1}
r = self.gm_client.get(self.feed_url)
self.assertContains(r, 'I am a message')
def test_create_message_admin(self):
r = self.admin_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
r = self.admin_client.post(self.add_msg, {'text': 'I am a message'})
assert r.json() == {'count': 1}
r = self.admin_client.get(self.feed_url)
self.assertContains(r, 'I am a message')
def test_create_message_wrong_player(self):
MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.oplayer_client.get(self.feed_url)
assert r.json() == []
r = self.oplayer_client.post(self.add_msg, {'text': 'I am another message'})
assert r.status_code == 404
def test_create_message_bad_form(self):
r = self.player_client.post(self.add_msg, {'Foo': 'Bar'})
assert r.json() == {'text': ['This field is required.']}
assert r.status_code == 400
def test_edit_message(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.player_client.get(reverse('message-edit', args=[m.pk]))
assert r.status_code == 405
r = self.player_client.post(reverse('message-edit', args=[m.pk]), {'text': 'I am no longer a message'})
self.assertRedirects(r, self.campaign.get_absolute_url())
r = self.player_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
self.assertContains(r, 'I am no longer a message')
def test_edit_message_gm(self):
m = MessageFactory(campaign=self.campaign, author=self.gm.user, text='I am a message')
r = self.gm_client.post(reverse('message-edit', args=[m.pk]), {'text': 'I am no longer a message'})
self.assertRedirects(r, self.campaign.get_absolute_url())
r = self.gm_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
self.assertContains(r, 'I am no longer a message')
def test_edit_message_bad_form(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.player_client.post(reverse('message-edit', args=[m.pk]), {'Foo': 'Bar'})
assert r.json() == {'text': ['This field is required.']}
assert r.status_code == 400
def test_edit_message_wrong_author(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.oplayer_client.post(reverse('message-edit', args=[m.pk]), {'Foo': 'Bar'})
assert r.status_code == 404
def test_edit_message_admin(self):
m = MessageFactory(campaign=self.campaign, author=self.gm.user, text='I am a message')
r = self.admin_client.post(reverse('message-edit', args=[m.pk]), {'text': 'I am no longer a message'})
self.assertRedirects(r, self.campaign.get_absolute_url())
r = self.admin_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
self.assertContains(r, 'I am no longer a message')
def test_delete_message(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.player_client.post(reverse('message-delete', args=[m.pk]))
self.assertRedirects(r, self.campaign.get_absolute_url())
r = self.player_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
def test_delete_message_gm(self):
m = MessageFactory(campaign=self.campaign, author=self.gm.user, text='I am a message')
r = self.gm_client.post(reverse('message-delete', args=[m.pk]))
self.assertRedirects(r, self.campaign.get_absolute_url())
r = self.gm_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
def test_delete_message_admin(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.admin_client.post(reverse('message-delete', args=[m.pk]))
self.assertRedirects(r, self.campaign.get_absolute_url())
r = self.admin_client.get(self.feed_url)
self.assertNotContains(r, 'I am a message')
def test_delete_message_wrong_author(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
r = self.oplayer_client.post(reverse('message-delete', args=[m.pk]))
assert r.status_code == 404
def test_message_feed(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
m2 = MessageFactory(campaign=self.campaign, author=self.gm.user, text='I am another message')
r = self.player_client.get(self.feed_url)
assert r.json() == [
{
'id': m2.id,
'text': 'I am another message',
'timestamp': m2.timestamp.strftime('%Y-%m-%dT%H:%M'),
'author': self.gm.user.screen_name,
'gm_message': True,
},
{
'id': m.id,
'text': 'I am a message',
'timestamp': m.timestamp.strftime('%Y-%m-%dT%H:%M'),
'author': self.player.screen_name,
'gm_message': False,
},
]
def test_message_feed_no_new_messages(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
m2 = MessageFactory(campaign=self.campaign, author=self.gm.user, text='I am another message')
r = self.player_client.get(self.feed_url + '?c=1')
assert r.json() == [
{
'id': m2.id,
'text': 'I am another message',
'timestamp': m2.timestamp.strftime('%Y-%m-%dT%H:%M'),
'author': self.gm.user.screen_name,
'gm_message': True,
},
{
'id': m.id,
'text': 'I am a message',
'timestamp': m.timestamp.strftime('%Y-%m-%dT%H:%M'),
'author': self.player.screen_name,
'gm_message': False,
},
]
r = self.player_client.get(self.feed_url + '?c=2')
assert r.json() == []
def test_message_feed_bad_count(self):
m = MessageFactory(campaign=self.campaign, author=self.player, text='I am a message')
m2 = MessageFactory(campaign=self.campaign, author=self.gm.user, text='I am another message')
r = self.player_client.get(self.feed_url)
assert r.json() == [
{
'id': m2.id,
'text': 'I am another message',
'timestamp': m2.timestamp.strftime('%Y-%m-%dT%H:%M'),
'author': self.gm.user.screen_name,
'gm_message': True,
},
{
'id': m.id,
'text': 'I am a message',
'timestamp': m.timestamp.strftime('%Y-%m-%dT%H:%M'),
'author': self.player.screen_name,
'gm_message': False,
},
]
r = self.player_client.get(self.feed_url + '?c=foo')
assert r.json() == []
| [
"tomhamiltonstubber@gmail.com"
] | tomhamiltonstubber@gmail.com |
30eaeb961ef4b87117f60c1d4d1fbb6327932614 | 6f9f30b6d82159de7d5c66ea130609584118e5bd | /mysite/library/migrations/0003_remove_visitor_tellphone.py | 20da9047314afe8ecf98f4e1fa9a0958d9ef3d8a | [] | no_license | arashi1214/Visitor-Registration | a4c63d76725a0da6fe6aaa8cd2ce5e8003f4dc0a | f4896020f3401079b3e74253f3a2ce1f8708dc1d | refs/heads/master | 2023-07-13T04:56:00.896991 | 2021-08-31T11:21:38 | 2021-08-31T11:21:38 | 382,020,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # Generated by Django 3.2.5 on 2021-07-08 14:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0002_rename_connect_addres_visitor_connect_address'),
]
operations = [
migrations.RemoveField(
model_name='visitor',
name='tellphone',
),
]
| [
"opp40519@gmail.com"
] | opp40519@gmail.com |
25bbdadef661bf791ba905765449eea02c40668f | 529a50b8ef8612cc32e2b1f0def7e329cfdd9290 | /scrapy/apps.py | 49ea53c3e4b341328129b9040d3a550219925ee5 | [] | no_license | pss2138/django_ebay_scrapy | cecf1ea8c867a5c4332f54429452f4dcfb09a1c2 | 7f2d787156d0bc60a90af59f4a213af7b549a9ce | refs/heads/master | 2022-12-08T16:45:53.953996 | 2020-09-09T02:29:01 | 2020-09-09T02:29:01 | 292,481,914 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class ScrapyConfig(AppConfig):
name = 'scrapy'
| [
"pss2138@gmail.com"
] | pss2138@gmail.com |
0b765030b9c2753fc899b737637e0db413e6a969 | 066672f317976847bb2a5d9f1d8a9c951836bd24 | /2020/01 - Report Repair/part1.py | 19f855bf1f92c1185a63708e35d4e58509586980 | [] | no_license | dcthomson/Advent-of-Code | c546ba3ad2b71cc274fe3196993ef24f23c3f198 | 7a6a6481e5e40a9d490b4f7c3b908d41f9b878cb | refs/heads/master | 2023-09-04T10:54:57.412613 | 2023-08-28T18:28:10 | 2023-08-28T18:28:10 | 164,589,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import sys
nums = []
with open(sys.argv[1], "r") as f:
for line in f:
nums.append(int(line))
for i in nums:
for j in nums:
if i + j == 2020:
print(i * j)
exit()
| [
"dcthomso@ndc.nasa.gov"
] | dcthomso@ndc.nasa.gov |
fadbf48ab4b9b3ac8bca2bcdcb82d07cbb002d77 | ddceca792d2ac3beadbaf5ac5f65044ffd7f2981 | /scripts/EmlServer.py | 7c276498486199d17651d76111a254e9d4501aca | [] | no_license | brieweb/EmailVerify | 9a7a33218a07055c6383fb127e702eef4a4ab487 | ba8e1f45f261e52b243b5e4a6a52a6c99d9ed05f | refs/heads/master | 2020-12-24T17:16:59.198275 | 2013-07-11T02:56:59 | 2013-07-11T02:56:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | from datetime import datetime
import asyncore
from smtpd import SMTPServer
class EmlServer(SMTPServer):
no = 0
def process_message(self, peer, mailfrom, rcpttos, data):
filename = '%s-%d.eml' % (datetime.now().strftime('%Y%m%d%H%M%S'), self.no)
f = open(filename, 'w')
f.write(data)
f.close
print data
print '%s saved.' % filename
self.no += 1
def run():
foo = EmlServer(('localhost', 1025), None)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run()
| [
"brian@brie.com"
] | brian@brie.com |
b44081fa968b39c839109db375a834db69361a27 | a2e5f53978e9d5b4286b26788b16d4cb7d7e1322 | /custom_hs/Happycall_bak/HC_190107bu.py | 9e645d851aaa4f69e041b3059d37f82e96da851f | [] | no_license | hakseongkim/hakseong | dce3ace0b88db4228c09511eae06905075680eac | 234817b34cf2961409f4acabd4ca24a67120ac53 | refs/heads/master | 2021-04-15T16:04:49.755200 | 2019-03-25T06:46:52 | 2019-03-25T06:49:14 | 126,677,905 | 0 | 0 | null | 2019-03-25T06:49:15 | 2018-03-25T08:24:54 | null | UTF-8 | Python | false | false | 51,909 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from concurrent import futures
import argparse
import grpc
import os
import random
from google.protobuf import empty_pb2
from google.protobuf import struct_pb2 as struct
import time
import syslog
import pymysql
import datetime
import re
exe_path = os.path.realpath(sys.argv[0])
bin_path = os.path.dirname(exe_path)
lib_path = os.path.realpath(bin_path + '/../lib/python')
sys.path.append(lib_path)
from maum.m2u.facade import userattr_pb2
from maum.m2u.da import provider_pb2
from maum.m2u.da.v3 import talk_pb2_grpc
from maum.m2u.da.v3 import talk_pb2
from maum.m2u.facade import front_pb2
# Custom import
#from qa.util import *
from qa import basicQA
from custom_hs.sds import SDS
from qa.util import Util
# echo_simple_classifier = {
# "echo_test": {
# "regex": [
# "."
# ]
# }
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
Session_Value = []
_NOTHING_FOUND_INTENT_ = "의도를 찾지 못했습니다."
model = 'Happy_Call_HH'
#def setSessionValue(session_id, key, value):
# try:
# sessionList = [x.get("session_id") for x in Session_Value]
# findIndex = sessionList.index(session_id)
# Session_Value[findIndex][key] = value
# except Exception as err:
# pass
#
#def getSessionValue(session_id, key):
# keyValue = None
# isNew = False
#
# if len(Session_Value) > 0:
# sessionList = [x.get("session_id") for x in Session_Value]
# try:
# findIndex = sessionList.index(session_id)
# keyValue = Session_Value[findIndex][key]
# except Exception as err:
# isNew = True
# else:
# isNew = True
#
# if isNew:
# Session_Value.append({"session_id" : session_id, "model" : ""})
# keyValue = Session_Value[len(Session_Value)-1][key]
#
# return keyValue
#
#def RegEx(self, session_id):
# print("RegEx call!!")
#
# print("get model : " + str(getSessionValue(session_id, "model")))
# modelname = str(getSessionValue(session_id, "model"))
# if modelname == "":
# intent = "Happy_Call_HH"
# setSessionValue(session_id, "model", intent)
# elif modelname == "2":
# intent = "privacy"
# setSessionValue(session_id, "model", intent)
#
# #intent = str(getSessionValue(session_id, "model"))
# return intent
class EchoDa(talk_pb2_grpc.DialogAgentProviderServicer):
# STATE
# state = provider_pb2.DIAG_STATE_IDLE
init_param = provider_pb2.InitParameter()
# PROVIDER
provider = provider_pb2.DialogAgentProviderParam()
provider.name = 'control'
provider.description = 'control intention return DA'
provider.version = '0.1'
provider.single_turn = True
provider.agent_kind = provider_pb2.AGENT_SDS
provider.require_user_privacy = True
# PARAMETER
def __init__(self):
syslog.syslog('init')
self.state = provider_pb2.DIAG_STATE_IDLE
syslog.syslog(str(self.state))
self.qa_util = Util()
self.Sds = SDS()
#
# INIT or TERM METHODS
#
def IsReady(self, empty, context):
print 'V3 ', 'IsReady', 'called'
status = provider_pb2.DialogAgentStatus()
status.state = self.state
return status
def Init(self, init_param, context):
print 'V3 ', 'Init', 'called'
self.state = provider_pb2.DIAG_STATE_INITIALIZING
# COPY ALL
self.init_param.CopyFrom(init_param)
# DIRECT METHOD
self.state = provider_pb2.DIAG_STATE_RUNNING
# returns provider
result = provider_pb2.DialogAgentProviderParam()
result.CopyFrom(self.provider)
print 'result called'
return result
def Terminate(self, empty, context):
print 'V3 ', 'Terminate', 'called'
# DO NOTHING
self.state = provider_pb2.DIAG_STATE_TERMINATED
return empty_pb2.Empty()
def GetUserAttributes(self, empty, context):
print 'V3 ', 'GetUserAttributes', 'called'
result = userattr_pb2.UserAttributeList()
attrs = []
# UserAttribute의 name은 DialogAgentProviderParam의 user_privacy_attributes에
# 정의한 이름과 일치해야 한다.
# 이 속성은 사용자의 기본 DB 외에 정의된 속성 외에 추가적으로 필요한
# 속성을 정의하는 것입니다.
lang = userattr_pb2.UserAttribute()
lang.name = 'lang'
lang.title = '기본 언어 설정'
lang.type = userattr_pb2.DATA_TYPE_STRING
lang.desc = '기본으로 사용할 언어를 지정해주세요.'
attrs.append(lang)
loc = userattr_pb2.UserAttribute()
loc.name = 'location'
loc.title = '기본 지역'
loc.type = userattr_pb2.DATA_TYPE_STRING
loc.desc = '기본으로 조회할 지역을 지정해주세요.'
attrs.append(loc)
device = userattr_pb2.UserAttribute()
device.name = 'device'
device.title = '기본 디바이스'
device.type = userattr_pb2.DATA_TYPE_STRING
device.desc = '기본으로 사용할 디바이스를 지정해주세요.'
attrs.append(device)
country = userattr_pb2.UserAttribute()
country.name = 'time'
country.title = '기준 국가 설정'
country.type = userattr_pb2.DATA_TYPE_STRING
country.desc = '기본으로 조회할 국가를 지정해주세요.'
attrs.append(country)
result.attrs.extend(attrs)
return result
#
# PROPERTY METHODS
#
def GetProviderParameter(self, empty, context):
print 'V3 ', 'GetProviderParameter', 'called'
result = provider_pb2.DialogAgentProviderParam()
result.CopyFrom(self.provider)
return result
def GetRuntimeParameters(self, empty, context):
print 'V3 ', 'GetRuntimeParameters', 'called'
result = provider_pb2.RuntimeParameterList()
params = []
db_host = provider_pb2.RuntimeParameter()
db_host.name = 'db_host'
db_host.type = userattr_pb2.DATA_TYPE_STRING
db_host.desc = 'Database Host'
db_host.default_value = '171.64.122.134'
db_host.required = True
params.append(db_host)
db_port = provider_pb2.RuntimeParameter()
db_port.name = 'db_port'
db_port.type = userattr_pb2.DATA_TYPE_INT
db_port.desc = 'Database Port'
db_port.default_value = '7701'
db_port.required = True
params.append(db_port)
db_user = provider_pb2.RuntimeParameter()
db_user.name = 'db_user'
db_user.type = userattr_pb2.DATA_TYPE_STRING
db_user.desc = 'Database User'
db_user.default_value = 'minds'
db_user.required = True
params.append(db_user)
db_pwd = provider_pb2.RuntimeParameter()
db_pwd.name = 'db_pwd'
db_pwd.type = userattr_pb2.DATA_TYPE_AUTH
db_pwd.desc = 'Database Password'
db_pwd.default_value = 'minds67~'
db_pwd.required = True
params.append(db_pwd)
db_database = provider_pb2.RuntimeParameter()
db_database.name = 'db_database'
db_database.type = userattr_pb2.DATA_TYPE_STRING
db_database.desc = 'Database Database name'
db_database.default_value = 'ascar'
db_database.required = True
params.append(db_database)
result.params.extend(params)
return result
def OpenSession(self, request, context):
print "openSession"
#param = {}
#bizRes = {}
lectureInfo = {}
resMessage = 'success'
meta = ''
lectureNum = ''
session_id = request.session.id
#self.showKeyValue(context.invocation_metadata())
if 'meta' in request.utter.meta:
#meta = eval(request.utter.meta['meta'].replace('null','\"\"'))
meta = request.utter.meta['meta']
if 'intent' in meta:
slots = meta['intent']['slots']
if 'lectureNumber' in slots:
lectureNum = slots['lectureNumber']['value']
#requestParam = Common.setMetaToParamMap(lectureNum=lectureNum,userTalk=' ' ,request=request, isopenRequest=True,session_id=session_id)
localSessionObj = session_id
print 'OpenSession id: '+str(request.session.id)
result = talk_pb2.TalkResponse()
res_meta = struct.Struct()
#res_meta['response'] = bizRes
result.response.meta.CopyFrom(res_meta)
#session 정보 ,session data 10k
result.response.session_update.id = session_id
res_context = struct.Struct()
res_context['session_data'] = str(lectureInfo)
result.response.session_update.context.CopyFrom(res_context)
print 'OpenSession_'
return result
def OpenSkill(self, request, context):
print "OpenSkill start"
print 'Open request: '+str(request)
session_id = request.session.id
print 'open_session_data: '+ str(session_id)+', '+str(context)
result = talk_pb2.TalkResponse()
print 'OpenSkill end'
return result
def CloseSkill(self, request, context):
result = talk_pb2.CloseSkillResponse()
return result
def EventT(self, empty, context):
print 'V3 ', 'DA Version 3 EventT', 'called'
# DO NOTHING
return empty_pb2.Empty()
def DBConnect(self, query):
conn = pymysql.connect(user="aicc",
password="ggoggoma",
host="aicc-bqa.cjw9kegbaf8s.ap-northeast-2.rds.amazonaws.com",
database="happycall",
charset="utf8",
use_unicode=False)
curs = conn.cursor(pymysql.cursors.DictCursor)
#query = "select * from test;"
print(query)
curs.execute(query)
print("query good!")
rows = curs.fetchall()
print(rows)
curs.execute("commit;")
curs.close()
conn.close()
return rows
def DBConnect_HC(self, query):
conn = pymysql.connect(user="maum",
password="ggoggoma",
host="localhost",
database="HappyCall",
charset="utf8",
use_unicode=False)
curs = conn.cursor(pymysql.cursors.DictCursor)
#query = "select * from test;"
print(query)
curs.execute(query)
print("query good!")
rows = curs.fetchall()
print(rows)
curs.execute("commit;")
curs.close()
conn.close()
return rows
def Talk(self, talk, context):
#print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
#print(talk)
#print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
a = {}
"""
talk.utter.utter : 사용자 발화
talk_res를 return
talk_res.response.speech.utter : 챗봇 발화
"""
talk.utter.utter = talk.utter.utter + ";$callSeq=6$call_Id=333"
print("talk : ", talk.utter.utter)
seq = talk.utter.utter[talk.utter.utter.index(";$callSeq")+10:talk.utter.utter.index(";$callSeq")+11]
call_id = talk.utter.utter[talk.utter.utter.index("$call_Id")+9:talk.utter.utter.index("$call_Id")+12]
print("", str(seq) + "," + str(call_id))
phoneNum = self.DBConnect("select cust_tel_no from campaign_target_list_tb where contract_no = '" + seq + "';")
phoneNum = phoneNum[0]['cust_tel_no']
#phoneNum = '01084520997'
uttertext = talk.utter.utter[talk.utter.utter.find(";$callSeq"):]
#uttertext = talk.utter.utter[talk.utter.utter.find("$callSeq"):]
talk.utter.utter = talk.utter.utter.replace(uttertext,"")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("talk : ", talk.utter.utter)
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
# SDS.dp.slots["product_code"] = "A"
#session_id = talk.session.id
dbsession = self.DBConnect("select session_value,cust_tel_no,cust_nm,join_time,talk_time,insured_contractor,insured_person,insured_closeproduct,address_main,address_sub,product_code,prod_name from campaign_target_list_tb where contract_no = '" + seq + "';")
# cust_nm 통화자(가입자)
# join_time 전체 시간 호출
# join_month 가입 월
# join_day 가입 일
# insurance_contractor 가입 계약자
# insurance_insured 가입 피보험자
# insurance_closeproduct 무해지상품 가입여부
# address_main 메인주소
# address_sub 세부주소
phone = dbsession[0]['cust_tel_no']
user_name = dbsession[0]['cust_nm']
join_time = dbsession[0]['join_time']
print ("join_time : " + str(join_time))
join_strp = datetime.datetime.strptime(str(join_time), '%Y-%m-%d %H:%M:%S')
join_month = join_strp.month
join_day = join_strp.day
talk_time = dbsession[0]['talk_time']
talk_strp = datetime.datetime.strptime(str(talk_time), '%Y-%m-%d %H:%M:%S')
talk_month = talk_strp.month
talk_day = talk_strp.day
talk_hour = talk_strp.hour
talk_minute = talk_strp.minute
insured_contractor = dbsession[0]['insured_contractor']
insured_person = dbsession[0]['insured_person']
insured_closeproduct = dbsession[0]['insured_closeproduct']
privacy_add1 = dbsession[0]['address_main']
privacy_add2 = dbsession[0]['address_sub']
product_code = dbsession[0]['product_code']
prod_name = dbsession[0]['prod_name']
if dbsession[0]['session_value'] is None:
dbsessioncode = talk.session.id
self.DBConnect("update campaign_target_list_tb set session_value = '" + str(dbsessioncode) + "' where contract_no = '" + seq + "';")
session_id = dbsessioncode
else:
session_id = int(dbsession[0]['session_value'])
talk_res = talk_pb2.TalkResponse()
##초기 모델 설정
dbsession = self.DBConnect("select model from campaign_target_list_tb where contract_no = '" + seq + "';")
if dbsession[0]['model'] is None:
model = "Happy_Call_HH"
else:
model = dbsession[0]['model']
#question = talk.utter.utter
meta = dict()
#meta['seq_id'] = util.time_check(0)
meta['log_type'] = 'SVC'
meta['svc_name'] = 'DA'
#output = ""
original_answer = ""
engine_path = list()
answer_engine = "None"
status_code = ""
status_message = ""
flag = False
weight = 0
code = 'None'
sds_intent = ""
# SDS
dbtask = self.DBConnect("select task from campaign_target_list_tb where contract_no = '" + seq + "';")
task = dbtask[0]['task']
sds_intent = self.Sds.GetIntent(talk.utter.utter, model)
if task == 'task2' and sds_intent == "affirm":
print("성공11111")
sds_res = self.Sds.Talk(talk.utter.utter, session_id, model, product_code)
self.DBConnect("update campaign_target_list_tb set model='privacy' where contract_no = '" + seq + "';")
model = 'privacy'
#입력받는 주소를 여기다가 처리를 해줘야함
if task == 'PRIVACY2' and sds_intent == "affirm":
print("성공22222")
print("g")
talk.utter.utter = '$next$'
self.DBConnect("update campaign_target_list_tb set model='Happy_Call_HH' where contract_no = '" + seq + "';")
model = 'Happy_Call_HH'
if task == 'PRIVACY3' and sds_intent == "privacy3":
print("성공333333")
self.DBConnect("update campaign_target_list_tb set model='Happy_Call_HH' where contract_no = '" + seq + "';")
model = 'Happy_Call_HH'
sds_res = self.Sds.Talk(talk.utter.utter, session_id, model, product_code)
#self.DBConnect("update test set task='" + sds_res['current_task'] + "' where contract_no = '" + seq + "';")
sds_intent = self.Sds.GetIntent(talk.utter.utter, model)
print("sds_intent의 값 : " + str(sds_intent))
# 결과값 DB에 저장하는 방식
b = []
b.append("time")
b.append("timeAffirm")
b.append("timeEnd")
for i in range(1, 15):
b.append("task" + str(i))
for i in range(1, 4):
b.append("PRIVACY" + str(i))
if task in b:
camp_id_db = self.DBConnect("select camp_id from hc_hh_campaign_info where task = '" + task + "';")
print (camp_id_db)
camp_id = str(camp_id_db[0]['camp_id'])
call_id_db = self.DBConnect("select camp_id from hc_hh_campaign_info where task = '" + task + "';")
print (camp_id_db)
camp_id = str(camp_id_db[0]['camp_id'])
if sds_intent == 'affirm':
self.DBConnect("insert into hc_hh_campaign_score (call_id, contract_no, info_seq, info_task, task_value) values ('"+call_id+"','"+seq+"','"+camp_id+"','"+task +"','Y')");
elif sds_intent == 'negate':
#self.DBConnect("update campaign_target_list_tb set " + task + "='아니오' where contract_no = '" + seq + "';")
self.DBConnect("insert into hc_hh_campaign_score (call_id, contract_no, info_seq, info_task, task_value) values ('"+call_id+"','"+seq+"','"+camp_id+"','"+task +"','N')");
elif sds_intent == 'overlap' or sds_intent == 'noproportion':
self.DBConnect("insert into hc_hh_campaign_score (call_id, contract_no, info_seq, info_task, task_value) values ('"+call_id+"','"+seq+"','"+camp_id+"','"+task +"','중복')") ;
elif sds_intent == 'nooverlap' or sds_intent == 'proportion':
self.DBConnect("insert into hc_hh_campaign_score (call_id, contract_no, info_seq, info_task, task_value) values ('"+call_id+"','"+seq+"','"+camp_id+"','"+task +"','비례')") ;
else:
self.DBConnect("insert into hc_hh_campaign_score (call_id, contract_no, info_seq, info_task, task_value) values ('"+call_id+"','"+seq+"','"+camp_id+"','"+task +"','입력')") ;
# self.DBConnect("update hc_hh_campaign_score set " + task + "='"+talk.utter.utter+"' where contract_no = '" + seq + "';")
#디비 이관 전 작업하던더
#if task is not None or sds_intent != 'next':
#self.DBConnect("insert into hc_hh_1_result (phone, user_name, session_name, model, task, utter, intent, request_time) values('" + str(phone) + "','" + user_name + "','" + str(session_id) + "','" + model + "','" + str(task) + "','" + talk.utter.utter + "','" + sds_intent + "',NOW());")
#if sds_res['current_task'] == "timeEnd" and sds_res['intent'] == "affirm":
#현재 위치를 저장하는 로직
self.DBConnect("update campaign_target_list_tb set task='"+ sds_res['current_task'] + "' where contract_no = '" + seq + "';")
print("SDS Start!")
#dbtask = self.DBConnect("select task from test where contract_no = '" + seq + "';")
#task = dbtask[0]['task']
#print("++++++++++++++++taskname : " + sds_res['current_task'] + "intent : " + dialog_act)
#그렇다면 넘어가는 로직을 여기다가 잡아놔야 할 것 같음
# 값을 append에 포함되지 않을 경우, 해당로직을 건너 뛰는 것으로...
# SDS
dbtask = self.DBConnect("select task from campaign_target_list_tb where contract_no = '" + seq + "';")
task = dbtask[0]['task']
if task == 'task4' and insured_contractor == insured_person:
talk.utter.utter = "$task4$"
self.DBConnect("update campaign_target_list_tb set task='task5' where contract_no = '" + seq + "';")
sds_res = self.Sds.Talk(talk.utter.utter, session_id, model, product_code)
sds_list = []
# tasknumber = task.find('task':)
if product_code == 'b':
sds_list.append(9)
sds_list.append(11)
#sds_list.append(13)
# if product_code == 'a': sds_list.append(9,10,11,12,13,14)
# if product_code == 'a': sds_list.append(9,10,11,12,13,14)
print (sds_list)
task9_info = ""
task11_info = ""
task13_info = ""
if sds_list == []:
pass
elif max(sds_list) == 9:
task9_info = "다음부터 진행되는 질문은 담보문항에 대한 질문입니다."
elif max(sds_list) == 11:
task11_info = "다음부터 진행되는 질문은 담보문항에 대한 질문입니다."
elif max(sds_list) == 13:
task13_info = "다음부터 진행되는 질문은 담보문항에 대한 질문입니다."
for sds_temp in sds_list:
dbtask = self.DBConnect("select task from campaign_target_list_tb where contract_no = '" + seq + "';")
task = dbtask[0]['task']
if task is None or task == "":
tasknumber = ""
else:
tasknumber = task[4:]
print("tasknumber, sds_temp : "+tasknumber + str(sds_temp))
if tasknumber == str(sds_temp):
talk.utter.utter = "$task"+ str(sds_temp) +"$"
print("utter가 들어간 값은 : " + talk.utter.utter)
self.DBConnect("update campaign_target_list_tb set task='task"+str(sds_temp+1)+"' where contract_no = '" + seq + "';")
sds_res = self.Sds.Talk(talk.utter.utter, session_id, model, product_code)
if tasknumber == 13:
pass
else:
talk.utter.utter = "$task"+ str(sds_temp+1) +"$"
print("utter가 들어간 값은 : " + talk.utter.utter)
self.DBConnect("update campaign_target_list_tb set task='task"+str(sds_temp+2)+"' where contract_no = '" + seq + "';")
sds_res = self.Sds.Talk(talk.utter.utter, session_id, model, product_code)
print(sds_res)
#print("===============test=============")
original_answer = sds_res['response']
answer_engine = "SDS"
engine_path.append("SDS")
#original_answer = self.unknown_answer()
#첫 SDS 답변 입력사항
original_answer = sds_res['response']
talk_res.response.speech.utter = original_answer
#시간 테스트
#talk_time = dbsession[0]['talk_time']
#talk_strp = datetime.datetime.strptime(talk_time, '%Y-%m-%d %H:%M:%S')
#talk_month = talk_strp.month
#talk_day = talk_strp.day
#talk_hour = talk_strp.hour
#talk_minute = talk_strp.minute
nextweekdic = {
"월요일": {0: 7, 1: 6, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1},
"화요일": {0: 8, 1: 7, 2: 6, 3: 5, 4: 4, 5: 3, 6: 2},
"수요일": {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3},
"목요일": {0: 10, 1: 9, 2: 8, 3: 7, 4: 6, 5: 5, 6: 4},
"금요일": {0: 11, 1: 10, 2: 9, 3: 8, 4: 7, 5: 6, 6: 5},
"토요일": {0: 12, 1: 11, 2: 10, 3: 9, 4: 8, 5: 7, 6: 6},
"일요일": {0: 13, 1: 12, 2: 11, 3: 10, 4: 9, 5: 8, 6: 7}
}
weekdic = {
"월요일": {0: 0, 1: -1, 2: -2, 3: -3, 4: -4, 5: -5, 6: -6},
"화요일": {0: 1, 1: 0, 2: -1, 3: -2, 4: -3, 5: -4, 6: -5},
"수요일": {0: 2, 1: 1, 2: 0, 3: -1, 4: -2, 5: -3, 6: -4},
"목요일": {0: 3, 1: 2, 2: 1, 3: 0, 4: -1, 5: -2, 6: -3},
"금요일": {0: 4, 1: 3, 2: 2, 3: 1, 4: 0, 5: -1, 6: -2},
"토요일": {0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0, 6: -1},
"일요일": {0: 6, 1: 5, 2: 4, 3: 3, 4: 2, 5: 1, 6: 0}
}
daylist = {0: "월", 1: "화", 2: "수", 3: "목", 4: "금", 5: "토", 6: "일"}
temp_list = [0, 1, 2, 3, 4, 5, 6]
temp_list_kor = ["월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"]
# 현재시간 입력
next_month = ""
next_day = ""
next_part = ""
next_hour = ""
next_minute = ""
#next_time = talk_time
next_time = datetime.datetime.now()
#시간 컨펌
a['nextweek'] = ""
a['morae'] = ""
a['tomorrow'] = ""
a['today'] = ""
a['day'] = ""
a['part'] = ""
a['hour'] = ""
a['minute'] = ""
a['input_month'] = ""
a['input_day'] = ""
if sds_res['current_task'] == 'timeAffirm':
items = sds_res['intent.filled_slots.items']
print(items)
for id in items:
a[id[0]] = id[1]
print(id)
if a['nextweek'] == "" : slot_nextweek = ""
elif a['nextweek'] == '돌아오는' : slot_nextweek = "다음"
else: slot_nextweek = a['nextweek']
if a['morae'] == "" : slot_morae = ""
else: slot_morae = a['morae']
if a['input_month'] == "" : slot_input_month = ""
else: slot_input_month = a['input_month']
if a['input_day'] == "" : slot_input_day = ""
else: slot_input_day = a['input_day']
if a['tomorrow'] == "" : slot_tomorrow = ""
else: slot_tomorrow = a['tomorrow']
if a['today'] == "" : slot_today = ""
else: slot_today = a['today']
if a['day'] == "" : slot_day = ""
else: slot_day = a['day']
if a['part'] == "" : slot_part = ""
elif a['part'] == '저녁' or a['part'] == '밤': slot_part = "오후"
elif a['part'] == '아침' or a['part'] == '새벽': slot_part = "오전"
else: slot_part = a['part']
if a['hour'] == "" : slot_hour = ""
else: slot_hour = a['hour']
if a['minute'] == "" : slot_minute = ""
elif a['minute'] == "반": slot_minute = "30"
else: slot_minute = a['minute']
### 시를 수정하는 로직
print ('첫번쨰 시 출력' + str(slot_hour))
re_input = str(slot_hour)
re_input = re_input.replace(' ','')
hanCount = len(re.findall(u'[\u3130-\u318F\uAC00-\uD7A3]+', re_input.decode('utf-8')))
print(hanCount)
if hanCount >= 1:
print("여기에 들어왔따")
slot_hour = self.change_bu(re_input)
hanCount = len(re.findall(u'[\u3130-\u318F\uAC00-\uD7A3]+', slot_hour.decode('utf-8')))
if hanCount >= 1:
slot_hour = self.change(re_input)
else:
print('시에서 1차 분류 거치고 한글은 더이상 없는 것 같네요')
pass
else:
print('시에서는 한글은 없는 것 같네요')
pass
print ('두번쨰 시 출력' + str(slot_hour))
# slot_hour = int(slot_hour)
### 분를 수정하는 로직
re_input = str(slot_minute)
re_input = re_input.replace(' ','')
hanCount = len(re.findall(u'[\u3130-\u318F\uAC00-\uD7A3]+', re_input.decode('utf-8')))
if hanCount >= 1:
slot_minute = self.change_bu(re_input)
hanCount = len(re.findall(u'[\u3130-\u318F\uAC00-\uD7A3]+', slot_minute.decode('utf-8')))
if hanCount >= 1:
slot_minute = self.change(re_input)
else:
print('분에서 1차 분류 거치고 한글은 더이상 없는 것 같네요')
pass
else:
print('분에서는 한글은 없는 것 같네요')
pass
# slot_minute = int(slot_minute)
print ("slot_nextweek : " + str(slot_nextweek))
print ("slot_morae : " + str(slot_morae))
print ("slot_tomorrow : " + str(slot_tomorrow))
print ("slot_today : " + str(slot_today))
print ("slot_day : " + str(slot_day))
print ("slot_part : " + str(slot_part))
print ("slot_hour : " + str(slot_hour))
print ("slot_minute : " + str(slot_minute))
print("시이이이이이작")
if slot_input_month is not None and slot_input_month != "":
if slot_input_day is not None and slot_input_day != "":
print ("일월 입력")
next_time = next_time.replace(month=int(slot_input_month), day=int(slot_input_day))
elif slot_input_day is None and slot_input_day == "":
print ("월은 있는데 일은 입력되지 않았습니다.")
talk.utter.utter = "$dayMiss$"
elif slot_input_month is None or slot_input_month == "":
if slot_input_day is not None and slot_input_day != "":
next_time = next_time.replace(day=int(slot_input_day))
#print("일은 있는데 월을 입력하지 않았습니다.")
#talk_utter.utter = "$dayMiss$"
else:
if slot_nextweek == "이번":
if slot_day in temp_list_kor:
print("여기에 들어와씀")
b = int(weekdic[str(slot_day)][next_time.weekday()])
#next_time = next_time.replace(day=next_time.day + b)
plus_time = datetime.timedelta(days=b)
next_time = next_time + plus_time
elif slot_day is None or slot_day == "":
talk.utter.utter = "$dayMiss$"
else:
talk.utter.utter = "$dayMiss$"
elif slot_nextweek == "다음":
print("다음 적용")
# if test_time.weekday() in temp_list:
if slot_day in temp_list_kor:
print("111111111111111111111 : " + str(slot_day) + str(next_time.weekday()))
b = int(nextweekdic[str(slot_day)][next_time.weekday()])
print (b)
#next_time = next_time.replace(day=next_time.day + int(b))
plus_time = datetime.timedelta(days=b)
next_time = next_time + plus_time
elif slot_day is None or slot_day == "":
b = 7
#next_time = next_time.replace(day=next_time.day + int(b))
plus_time = datetime.timedelta(days=b)
next_time = next_time + plus_time
else:
talk.utter.utter = "$dayMiss$"
print("쓸대없는 요일을 말씀하셨습니다.")
#일자 더하기
elif slot_morae == "모레" or slot_morae == "내일 모레" or slot_morae == "내일모레":
b = 2
plus_time = datetime.timedelta(days=b)
next_time = next_time + plus_time
#next_time = next_time.replace(day=next_time.day + b)
elif slot_tomorrow == "내일":
print("들어와씀")
b = 1
plus_time = datetime.timedelta(days=b)
next_time = next_time + plus_time
#next_time = next_time.replace(day=next_time.day + b)
elif slot_today == "오늘":
b = 0
plus_time = datetime.timedelta(days=b)
next_time = next_time + plus_time
#next_time = next_time.replace(day=next_time.day + b)
elif slot_nextweek == "" or slot_nextweek is None:
pass
else:
talk.utter.utter = "$inputMiss$"
print ("일정을 입력을 하시지 않았습니다. 다시 입력해주세요.")
#elif slot_day[daylist] in temp_list:
if slot_hour is None or slot_hour == "":
if slot_minute is None or slot_minute == "":
print(talk_time.hour + talk_time.minute)
next_time = next_time.replace(hour=int(talk_time.hour), minute=int(talk_time.minute))
pass
else:
print("시를 말씀해주시지 않았습니다.")
talk.utter.utter = "$hourMiss$"
else:
if slot_minute is None or slot_minute == "":
print("3333333333333333333333")
next_time = next_time.replace(hour=int(slot_hour), minute = 00)
else:
print("4444444444444444444444")
print ("slot_hour + :"+ str(slot_hour))
print ("slot_minute + :"+ str(slot_minute))
next_time = next_time.replace(hour=int(slot_hour), minute=int(slot_minute))
next_month = next_time.month
next_day = next_time.day
if slot_part == "오전" or slot_part == "오후":
next_part = slot_part
if slot_hour is None or slot_hour == "":
print("나와ㄸ따다ㅣㅏㅓ아러나리어나리어나리어내ㅏ")
talk.utter.utter = "$hourMiss$"
next_hour = next_time.hour
#next_time.minute = ""
else:
next_hour = next_time.hour
else:
if next_time.hour > 12 and next_time.hour < 24:
next_part = "오후"
next_hour = next_time.hour - 12
elif next_time.hour == 12:
next_part = "오후"
next_hour = 12
elif next_time.hour > 0 and next_time.hour < 12:
next_part = "오전"
next_hour = next_time.hour
elif next_time.hour == 0:
next_part = "오전"
next_hour = next_time.hour
next_minute = next_time.minute
#slot_hour = self.readNumberHour((a['hour']))
#text_output = self.readNumber(31)
#talk_res.response.speech.utter = text_output
print(next_time)
#next_month = ""
#next_day = ""
#next_part = ""
#next_hour = ""
#next_minute = ""
if sds_res['current_task'] == 'timeAffirm':
if talk.utter.utter == "$hourMiss$" or talk.utter.utter == "$dayMiss$":
print("dcdddddddddddddddddddddddddddddddddddd")
sds_res = self.Sds.Talk(talk.utter.utter, session_id, model, product_code)
else:
print("===============test=============")
print("next_month : " + str(next_month))
print("next_day : " + str(next_day))
print("next_part : " + str(next_part))
print("next_hour : " + str(next_hour))
print("next_minute : " + str(next_minute))
print("===============test=============")
talk_res.response.speech.utter = "말씀하신 통화가능 시간이 " + self.readNumberMinute(next_month) +"월"+ self.readNumberMinute(next_day) + "일 "+str(next_part) +", " + self.readNumberHour(next_hour) + "시 "+ self.readNumberMinute(next_minute) + "분이 맞습니까?"
self.DBConnect("update campaign_target_list_tb set next_time='"+str(next_time)+"' where contract_no = '" + seq + "';")
#talk_res.response.speech.utter = "말씀하신 통화가능 시간이 " + next_month +"월"+ next_day + "일 "+next_part +", " + next_hour + "시 "+ next_minute + "분이 맞습니까?"
#말씀하신 일정이 11월 19일 오전 3시 30분이 맞습니까?
#if sds_res['current_task'] == 'timeEnd':
#self.DBConnect("update hc_hh_campaign_score set next_time='"+str(next_time)+"' where contract_no = '" + seq + "';")
#질문 수정사항
sds_intent = self.Sds.GetIntent(talk.utter.utter, model)
#task1
if sds_res['current_task'] == 'task1':
talk_res.response.speech.utter = "안녕하십니까?, 현대해상 고객센터입니다, " + user_name + "고객님 되십니까?"
#task2
if sds_res['current_task'] == 'task2':
talk_res.response.speech.utter = "" + self.readNumberMinute(join_month) + "월" + self.readNumberMinute(join_day) +"일, 저희 현대해상 "+prod_name+"을 가입해 주셔서, 진심으로 감사드립니다, 가입하실때, 상품의 중요한 사항이 제대로 설명되었는지, 확인드리고자 연락드렸습니다, 소요시간은 약 삼분정도인데, 잠시 통화 가능하십니까?"
#PRIVACY1
if sds_res['current_task'] == 'PRIVACY1':
talk_res.response.speech.utter = "지금부터 진행하는 내용은 고객님의 권리보호를 위해 녹음되며, 답변하신 내용은 향후 민원 발생시, 중요한 근거자료로 활용되오니, 정확한 답변 부탁드리겠습니다, 먼저 본인확인을 위해 주민번호 여섯자리를 말씀해주세요."
#PRIVACY2
if sds_res['current_task'] == 'PRIVACY2':
talk_res.response.speech.utter = "말씀해주셔서 감사합니다, 고객님의 주소는 " + str(privacy_add1) + ", " + str(privacy_add2) + "로 확인 되는데 맞으십니까?"
#PRIVACY3
if sds_res['current_task'] == 'PRIVACY3':
talk_res.response.speech.utter = "말씀해주셔서 감사합니다, 고객님의 주소는 " + str(privacy_add1) + " 으로 확인되는데요, 나머지 주소는 어떻게 되십니까?"
# ask3
if sds_res['current_task'] == 'task3':
talk_res.response.speech.utter = "확인 감사드립니다, 계약하실 때 계약자 " + insured_contractor + "님께서 청약서, 상품설명서, 개인정보처리 동의서에 직접 서명하셨습니까?"
# task4
if sds_res['current_task'] == 'task4':
talk_res.response.speech.utter = "타인의 사망을 보장 해주는 계약의 경우 보험대상자도 반드시 서면동의를 해주셔야 하는데요, 피보험자 "+ insured_person +" 님도 직접 서명하셨습니까?"
# task8
if sds_res['current_task'] == 'task8':
if insured_closeproduct == 'Y':
talk_res.response.speech.utter = "중도해지 또는 만기시, 환급금이 납입한 보험료보다 적을 수 있다는 설명을 들으셨습니까?"
else:
talk_res.response.speech.utter = "보험료 납입기간 중 중도 해지시, 해지환급금이 지급되지 않는다는 설명을 들으셨나요?"
# task9
if sds_res['current_task'] == 'task9':
talk_res.response.speech.utter = task9_info + "화재벌금 또는 과실 치사상 벌금 담보 등은 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
if sds_intent == "unknown":
talk_res.response.speech.utter = "화재벌금 담보는 중복 가입시 비례보상됩니다, 화재벌금 또는 과실 치사상 벌금 담보 등은 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
elif sds_intent == "affirm" or sds_intent == "negate":
talk_res.response.speech.utter = "화재벌금 또는 과실 치사상 벌금 담보 등은 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
#time
if sds_res['current_task'] == 'time':
talk_res.response.speech.utter = "그럼 가능하신 시간을 알려주시면 다시 연락드리겠습니다,주말도 가능하니, 편하신 요일과 시간을,말씀해주세요"
#time_end
if sds_res['current_task'] == 'timeEnd':
talk_res.response.speech.utter = "네 고객님, 말씀하신 시간에 다시 연락을 드리겠습니다, 현대해상 고객센터였습니다, 감사합니다. $callback$"
# task11
if sds_res['current_task'] == 'task11':
talk_res.response.speech.utter = task11_info + "일상생활배상책임 담보는 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
if sds_intent == "unknown":
talk_res.response.speech.utter = "일상생활배상책임 담보는 중복 가입시 비례보상됩니다, 일상생활배상책임 담보는 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
elif sds_intent == "affirm" or sds_intent == "negate":
talk_res.response.speech.utter = "일상생활배상책임 담보는 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
# task13
if sds_res['current_task'] == 'task13':
talk_res.response.speech.utter = task13_info+ "법률비용 담보는 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
if sds_intent == "unknown":
talk_res.response.speech.utter = "법률비용 담보는 중복 가입시 비례보상됩니다, 법률비용 담보는 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
elif sds_intent == "affirm" or sds_intent == "negate":
talk_res.response.speech.utter = "법률비용 담보는 중복가입시 보험금을 중복해서 받으실 수 있다고 설명 들으셨다면 중복, 실제지급액을 한도로 비례보상 된다는 설명을 들으셨다면 비례를 말씀해주세요."
# task13
if sds_res['current_task'] == 'time':
if sds_intent == "hourmiss":
talk_res.response.speech.utter = "통화 가능 시를 말씀해주시지 않았습니다.통화가능 시를 말씀해주세요."
if sds_intent == "daymiss":
talk_res.response.speech.utter = "통화 가능 요일을 말씀해주시지 않았습니다.통화가능 요일을 말씀해주세요."
#task14
if sds_res['current_task'] == 'task14':
talk_res.response.speech.utter = "네 고객님, 소중한시간 내주셔서 감사합니다. 현대해상 고객센터였습니다. $complete$"
print("[ANSWER]: " + original_answer)
#print("[SESSION_KEY] :" + )
print("[ENGINE]: " + answer_engine)
#위치 전송
#self.DBConnect("update hc_hh_campaign_score set task='"+ sds_res['current_task'] + "' where contract_no = '" + seq + "';")
return talk_res
# 1~999
def readNumberMinute(self,n):
n = int(n)
units = '일,십,백,천'.split(',')
nums = '일,이,삼,사,오,육,칠,팔,구'.split(',')
result = []
i = 0
while n > 0:
n, r = divmod(n, 10)
if r == 1:
result.append(str(units[i]))
elif i == 0 and r > 0:
result.append(nums[r - 1])
elif r > 0:
result.append(nums[r - 1] + str(units[i]))
i += 1
if len(result) == 0:
result.append("영")
return ''.join(result[::-1])
def readNumberHour(self,n):
n = int(n)
units = '한,열,백,천'.split(',')
nums = '한,두,세,네,다섯,여섯,일곱,어덟,아홉'.split(',')
# units = list('일십백천')
# nums = list('일이삼사오육칠팔구')
# units = ['일', '십', '백', '천']
# nums = ['일', '이' ,'삼' ,'사' ,'오' ,'육' ,'칠' ,'팔' ,'구']
result = []
i = 0
while n > 0:
n, r = divmod(n, 10)
if r == 1:
result.append(str(units[i]))
elif i == 0 and r > 0:
result.append(nums[r - 1])
elif r > 0:
result.append(nums[r - 1] + str(units[i]))
i += 1
if len(result) == 0:
result.append("영")
return ''.join(result[::-1])
def change_bu(self,input):
b = {
'한': '1',
'두': '2',
'세': '3',
'네': '4',
'다섯': '5',
'여섯': '6',
'일곱': '7',
'여덟': '8',
'아홉': '9',
'열': '10',
'열한': '11',
'열두': '12',
'열세': '13',
'열네': '14',
'열다섯': '15',
'열여섯': '16',
'열일곱': '17',
'열여덟': '18',
'열아홉': '19',
'스물': '20',
'스물한': '21',
'스물두': '22',
'스물세': '23',
'스물네': '24'
}
text = {'스물네', '스물세', '스물두', '스물한', '스물', '열아홉', '열여덟', '열일곱', '열여섯', '열다섯', '열네', '열세', '열두', '열한', '열', '아홉','여덟', '일곱', '여섯', '다섯', '네', '세', '두', '한'}
for t in text:
if input.find(t) >= 0:
input = input.replace(t, b[t])
else:
pass
return input
def change(self,input):
kortext = '영일이삼사오육칠팔구'
dic = {'십': 10, '백': 100, '천': 1000, '만': 10000, '억': 100000000, '조': 1000000000000}
result = 0
tmpResult = 0
num = 0
for i in range(0,len(input)):
token = input[i]
check = kortext.find(input[i])
if check == -1:
if '만억조'.find(token) == -1:
if num != 0:
tmpResult = tmpResult + num * dic[token]
else:
tmpResult = tmpResult + 1 * dic[token]
else:
tmpResult = tmpResult + num
if tmpResult != 0:
result = result + tmpResult * dic[token]
else:
result = result + 1 * dic[token]
tmpResult = 0
num = 0
else:
num = check
return result + tmpResult + num
def unknown_answer(self):
"""
리스트에서 랜덤으로 Unknown 답변 출력.
"""
unknown_text = ['죄송해요, 제가 잘 못 알아 들었어요. 키워드 위주로 다시 질문해주시겠어요?',
'답변을 찾을 수 없습니다. 다른 질문을 해주시면 성실히 답변해 드리겠습니다. ']
return random.choice(unknown_text)
def Close(self, req, context):
print 'V3 ', 'Closing for ', req.session_id, req.agent_key
talk_stat = provider_pb2.TalkStat()
talk_stat.session_key = req.session_id
talk_stat.agent_key = req.agent_key
return talk_stat
def EventT(self, empty, context):
print 'V3 ', 'DA Version 3 EventT', 'called'
# DO NOTHING
return empty_pb2.Empty()
def Open(self, req, context):
print 'V3 ', 'Open', 'called'
# req = talk_pb2.OpenRequest()
event_res = talk_pb2.OpenResponse()
event_res.code = 1000000
event_res.reason = 'success'
answer_meta = struct.Struct()
answer_meta["play1"] = "play1"
answer_meta["play2"] = "play2"
answer_meta["play3"] = "play3"
answer_meta.get_or_create_struct("audio1")["name"] = "media_play1"
answer_meta.get_or_create_struct("audio1")["url"] = "htpp://101.123.212.321:232/media/player_1.mp3"
answer_meta.get_or_create_struct("audio1")["duration"] = "00:10:12"
answer_meta.get_or_create_struct("audio2")["name"] = "media_play2"
answer_meta.get_or_create_struct("audio2")["url"] = "htpp://101.123.212.321:232/media/player_1.mp3"
answer_meta.get_or_create_struct("audio2")["duration"] = "00:00:15"
event_res.meta.CopyFrom(answer_meta)
answer_context = struct.Struct()
answer_context["context1"] = "context_body1"
answer_context["context2"] = "context_body2"
answer_context["context3"] = "context_body3"
event_res.context.CopyFrom(answer_context)
# DO NOTHING
return event_res
def Event(self, req, context):
print 'V3 ', 'Event', 'called'
# req = talk_pb2.EventRequest()
event_res = talk_pb2.EventResponse()
event_res.code = 10
event_res.reason = 'success'
answer_meta = struct.Struct()
answer_meta["meta1"] = "meta_body_1"
answer_meta["meta2"] = "meta_body_2"
event_res.meta.CopyFrom(answer_meta)
answer_context = struct.Struct()
answer_context["context1"] = "context_body1"
answer_context["context2"] = "context_body2"
answer_context["context3"] = "context_body3"
event_res.context.CopyFrom(answer_context)
return event_res
def serve():
parser = argparse.ArgumentParser(description='CMS DA')
parser.add_argument('-p', '--port',
nargs='?',
dest='port',
required=True,
type=int,
help='port to access server')
args = parser.parse_args()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
talk_pb2_grpc.add_DialogAgentProviderServicer_to_server(
EchoDa(), server)
listen = '[::]' + ':' + str(args.port)
server.add_insecure_port(listen)
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| [
"36613711+hakseongkim@users.noreply.github.com"
] | 36613711+hakseongkim@users.noreply.github.com |
6a412f8211c39b15d51608475b79640b8e605c99 | 079f2de1a3f4851ff33dee6b5e5640a408d5f312 | /formulaire_alert.py | 24d2a45fca2641ad2d63167e57bd43bec70b4e5b | [] | no_license | FatenFrioui/selenium-python-unittest-smoke-report | 02dd079c06035205bd22cc2a3d4b70205ce1138a | 64b90eb5d837808cc5424ef715cd0f39666db440 | refs/heads/master | 2023-09-01T04:35:00.945116 | 2021-10-12T18:28:46 | 2021-10-12T18:28:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | import time
from selenium import webdriver
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.keys import Keys
driver=webdriver.Chrome(executable_path="C:\webdriver\chromedriver.exe")
driver.get("https://www.tutorialspoint.com/selenium/selenium_automation_practice.htm")
driver.maximize_window()
time.sleep(3)
driver.refresh()
driver.find_element_by_xpath("//button[contains(text(),'Button')]").submit()
a=driver.switch_to.alert
time.sleep(3)
#accept alert
a=Alert(driver)
a.accept()
# a.dismiss()
# driver.fullscreen_window("alert") | [
"faten.frioui.1@gmail.com"
] | faten.frioui.1@gmail.com |
f4676f06c2197e853bce2ca0135df3bc10bbcdaa | 77a5335a780571b2f99cb55f71bb753af8de9647 | /test1.py | 176ff50f7ecfd7e4a256976c5b5630db5a2170d6 | [
"MIT"
] | permissive | wangzhaowzt/WZT | 502efa7abc2bb85dbfa9da5f28e05f9ec85c295c | e4282c6d2ace03f35bce9942e8405cae91f62214 | refs/heads/master | 2020-03-20T23:24:22.747013 | 2018-06-19T06:37:34 | 2018-06-19T06:37:34 | 137,844,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | name = "wangzhao"
| [
"614361245@qq.com"
] | 614361245@qq.com |
3eb0342dae47455c88ce635ba7837528bec00211 | bd5ff41adf799d3d267f0bd4ab4d6d3a77322578 | /spirometry/migrations/0001_initial.py | 619cf44faaac186e78bd89f94273d68e23a877d0 | [] | no_license | SamuelSilvaAraujo/clinica-admin | d4448dda742aff270ca8b6bc27e859f80151b7ae | 6b01695cd014db21d5b4c981e89ed4db0932b68e | refs/heads/master | 2022-12-30T12:38:29.644625 | 2020-10-10T01:08:02 | 2020-10-10T01:08:02 | 302,784,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | # Generated by Django 2.2.7 on 2020-02-16 19:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('patient', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Nome')),
],
),
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True, verbose_name='Data')),
('amount', models.IntegerField(verbose_name='Quantidade')),
('material', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='spirometry.Material', verbose_name='Material')),
],
),
migrations.CreateModel(
name='Spirometry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True, verbose_name='Data')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='patient.Patient', verbose_name='Paciente')),
],
),
]
| [
"samuelsas.samuel18@gmail.com"
] | samuelsas.samuel18@gmail.com |
e7d6146e8f06ad64e502726293b6f8146f118f2e | 68163f8009b9c34bd8600381ac8bf54e4675f560 | /texttutils/views.py | 5d0916a8d301221399c3532e0b1453f65494ca96 | [] | no_license | Premrajdeore27/TextUtil | 3202de0384cb1a5ff7ab2b2675b959a0f34d9362 | a5ffd903005c75d19d154ab78e3e7fc3689cf607 | refs/heads/master | 2023-07-07T22:36:41.238527 | 2021-08-16T19:54:16 | 2021-08-16T19:54:16 | 396,978,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,489 | py | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request,'index.html')
def analyze(request):
#get the text
djtext=(request.POST.get('text', 'default'))
# Check check
removepunc = (request.POST.get('removepunc', 'off'))
fullcaps = (request.POST.get('fullcaps', 'off'))
shortcaps = (request.POST.get('shortcaps', 'off'))
newlineremover = (request.POST.get('newlineremover', 'off'))
Extraspaceremover = (request.POST.get('Extraspaceremover', 'off'))
countcaps = (request.POST.get('countcaps', 'off'))
#Analyze the text
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_'''
analyzed = " "
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose':'Removed Punctuations', 'analyzed_text': analyzed}
djtext = analyzed
if (fullcaps == "on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.upper()
params = {'purpose': 'Changed to uppercase', 'analyzed_text': analyzed}
djtext == analyzed
if (shortcaps == "on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.lower()
params = {'purpose': 'changed to lowercase', 'analyzed_text': analyzed}
djtext == analyzed
if (newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char != "\r":
analyzed = analyzed + char
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext == analyzed
if (Extraspaceremover == "on"):
analyzed = ""
for index , char in enumerate(djtext):
if not (djtext[index] == " " and djtext[index + 1]== " "):
analyzed = analyzed + char
params = {'purpose':'Removed Extra Space','analyzed_text':analyzed}
djtext == analyzed
if (countcaps == "on"):
analyzed = 0
for char in djtext:
analyzed = analyzed + 1
params = {'purpose':'Count The Number Of Character','analyzed_text':analyzed}
djtext == analyzed
if (removepunc != "on" and fullcaps != "on" and shortcaps != "on" and newlineremover != "on" and Extraspaceremover != "on" and countcaps != "on"):
return HttpResponse("Error")
return render(request, 'analyze.html', params)
| [
"86760579+Premrajdeore27@users.noreply.github.com"
] | 86760579+Premrajdeore27@users.noreply.github.com |
3fee5b59e3eb1b94bcb75fc43b1ae3e7d7aea85a | c916622b1d23dbead5f8785530ccb2ab6164ab97 | /bert/run_classifier.py | 76bf845fd7d85a6668814de4771317a696322466 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | guojson/ACE-ADP | 3bebb03a96c4f4cf17d46d4088c700ee09c19a78 | 0f4c0fe29c9da859b92f5a2bbfdec0d724a8061d | refs/heads/main | 2023-08-11T19:41:57.357119 | 2021-09-15T13:49:14 | 2021-09-15T13:49:14 | 406,746,119 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,782 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import tensorflow as tf
from bert import tokenization, modeling, optimization
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| [
"gxc@cau.edu.cn"
] | gxc@cau.edu.cn |
3bc0f99a416ba6f34e286b8d92caf92f577dbede | 156438262d49447d925383128b449439488e757a | /ch07/_generate_data.py | b6e48c0cbe9652b2e213ab66158c82df7ce3e3d6 | [] | no_license | sajacaros/python_cleancode | 1f9db55160688bf0e16e909075f721ecc905ad61 | c51270f5570b1ac42cff1fc84a0e7ced2726b44d | refs/heads/master | 2021-02-07T09:37:21.936779 | 2020-03-02T14:32:44 | 2020-03-02T14:32:44 | 244,010,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import os
import tempfile
PURCHASES_FILE = os.path.join(tempfile.gettempdir(), "purchases.csv")
def create_purchases_file(filename, entries=1_000_000):
if os.path.exists(filename):
return
with open(filename, "w+") as f:
for i in range(entries):
line = f"2018-01-01,{i}\n"
f.write(line)
if __name__ == "__main__":
create_purchases_file(PURCHASES_FILE) | [
"dukim@bnpinnovation.com"
] | dukim@bnpinnovation.com |
3f403776cbe66282ea6ee1106d3c6f6480becaab | 1456711961dcd298c228699ee3ce0861a2f419c1 | /models/UserProfile.py | a81ecf0b4e91328878bcea8631de26fb44397874 | [] | no_license | bxji/cruzhacks-challenge | b573f5935f0f55f6bb3b5b8cc42d7776345d4d9b | 9c0fda615991116df95cc0e7e75cda4ea2af82f0 | refs/heads/master | 2020-04-27T05:57:43.775727 | 2019-03-09T00:02:13 | 2019-03-09T00:02:13 | 174,095,284 | 0 | 1 | null | 2019-10-02T04:45:44 | 2019-03-06T07:35:07 | Python | UTF-8 | Python | false | false | 1,178 | py | class UserProfile:
# we can calculate age from taking datetime.now() - birthdate
def __init__(self, UserProfileId, UserTypeCode, Name, School, Major, Street1, Street2, City, StateCode, ZipCode, CountryCode, Phone, Email, BirthDate, ProfileImageUrl):
self.UserProfileId = UserProfileId
self.UserTypeCode = UserTypeCode
self.Name = Name
self.School = School
self.Major = Major
self.Street1 = Street1
self.Street2 = Street2
self.City = City
self.StateCode = StateCode
self.ZipCode = ZipCode
self.CountryCode = CountryCode
self.Phone = Phone
self.Email = Email
self.BirthDate = BirthDate
self.ProfileImageUrl = ProfileImageUrl
def asdict(self):
return {'UserProfileId':self.UserProfileId, 'UserTypeCode':self.UserTypeCode, 'Name':self.Name, 'School':self.School, 'Major':self.Major, 'Street1':self.Street1, 'Street2':self.Street2, 'City':self.City, 'StateCode':self.StateCode, 'ZipCode':self.ZipCode, 'CountryCode':self.CountryCode, 'Phone':self.Phone, 'Email':self.Email, 'BirthDate':self.BirthDate, 'ProfileImageUrl':self.ProfileImageUrl} | [
"xj9797@gmail.com"
] | xj9797@gmail.com |
52080a362e4c3ceb2822f229da8005edd6ef036e | 4a5f11b55e23999a82b62f5c72b44e9a36d24f63 | /simplemooc/forum/admin.py | 7c813d107c771cc9ce0f430c826d0736f3a53f31 | [] | no_license | diogo-alves/simplemooc | dca62bfcb2ea6357a551a5760778537f083b675c | cfec59f99888e4e23d41f020ff06bfdf39f70203 | refs/heads/master | 2022-05-10T10:32:18.686313 | 2019-06-04T19:30:43 | 2019-06-04T19:30:43 | 190,260,470 | 0 | 0 | null | 2022-04-22T21:34:44 | 2019-06-04T18:46:43 | Python | UTF-8 | Python | false | false | 585 | py | from django.contrib import admin
from .models import Thread, Reply
class ThreadAdmin(admin.ModelAdmin):
list_display = ['title', 'body', 'author', 'updated_at']
search_fields = ['title', 'body', 'author__username']
prepopulated_fields = {'slug': ('title',)}
class ReplyAdmin(admin.ModelAdmin):
list_display = ['thread', 'reply', 'author', 'correct', 'updated_at']
search_fields = ['thread', 'reply', 'author__username']
list_filter = ['thread__title', 'author__username']
admin.site.register(Thread, ThreadAdmin)
admin.site.register(Reply, ReplyAdmin)
| [
"diogo.alves.ti@gmail.com"
] | diogo.alves.ti@gmail.com |
46248fac96121f6b8eb5f785ea4ea44210e4f9c7 | a90a734a04032ab48970857c4d43ff00be639a5a | /poll_project/polling_app/urls.py | 3aca1fdafc69c50c62b96679ea12a08c3d25b5a2 | [] | no_license | saumyadudeja/Survey_tool_updates | 80d7de856f2e5be96210869e0f39b3594124b24f | ce5bfa61846776bc83da593241617eba2d22c85f | refs/heads/master | 2023-03-04T23:47:37.615084 | 2021-02-16T16:12:10 | 2021-02-16T16:12:10 | 339,456,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | """The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, reverse_lazy
from django.views.generic.base import RedirectView
#from polling_app.views import Screening
#from polling_app.views.index_view import IndexView
urlpatterns = [
#path('survey/<survey_id>/',,name='survey-detail'),
#path('survey/<survey_id>/',,name='survey-detail-step'),
#path('', IndexView.as_view(), name ='index-view'),
#path('confirm/<survey_id>/',,name='survey-confirmation'),
#path('completed/', , name='survey-complete'),
#path('addquestions/', poll_views.addquestions, name='add question'),
#path('_nested_admin/', include('nested_admin.urls')),
] | [
""
] | |
e09cb301ca117fd905794987d996e78187545a4d | 43f836fceeae60fcb22de2825293c51e79ea297c | /food_tracker/food/models.py | b98c25c6fbd21290b356e41e4da8f41b8dca32cf | [] | no_license | cebasilio/food_tracker | ccc7d0a026796c576e1b89a81b7e691d1bdf798e | 8478654dab0d27ed3b883b402b7d429e8aeba1df | refs/heads/master | 2020-07-22T03:13:33.738482 | 2019-09-08T07:05:19 | 2019-09-08T07:05:19 | 207,057,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | from django.db import models
# Create your models here.
class Food(models.Model):
name = models.CharField(verbose_name="Food Name", max_length=200)
calories = models.FloatField(verbose_name="Calories (kcal)")
total_fat = models.FloatField(verbose_name="Total Fat (g)")
saturated_fat = models.FloatField(verbose_name="Saturated Fat (g)")
cholesterol = models.FloatField(verbose_name="Cholesterol (mg)")
sodium = models.FloatField(verbose_name="Sodium (mg)")
total_carbohydrate = models.FloatField(verbose_name="Total Carbohydrate (g)")
dietary_fibre = models.FloatField(verbose_name="Dietary Fibre (g)")
sugar = models.FloatField(verbose_name="Sugar (g)")
protein = models.FloatField(verbose_name="Protein (g)")
def __str__(self):
return "%s" % self.name
class Meal(models.Model):
BREAKFAST = 1
MORNING_SNACK = 2
LUNCH = 3
AFTERNOON_SNACK = 4
DINNER = 5
EVENING_SNACK = 6
MEAL_TIME_TYPES = (
(BREAKFAST, "Breakfast"),
(LUNCH, "Lunch"),
(AFTERNOON_SNACK, "Afternoon Snack"),
(DINNER, "Dinner"),
(EVENING_SNACK, "Evening Snack")
)
food = models.ForeignKey(Food, verbose_name="Food",on_delete = models.CASCADE)
serving_size = models.IntegerField(verbose_name="Serving Size")
meal_time = models.IntegerField(verbose_name="Meal Time", choices=MEAL_TIME_TYPES)
def __str__(self):
return "%s" % self.food
def get_total_calories(self):
return self.serving_size * self.food.calories
def get_total_fat(self):
return self.serving_size * self.food.total_fat
def get_total_saturatedFat(self):
return self.serving_size * self.food.saturated_fat
def get_total_cholesterol(self):
return self.serving_size * self.food.cholesterol
def get_total_sodium(self):
return self.serving_size * self.food.sodium
def get_total_carbohydrate(self):
return self.serving_size * self.food.total_carbohydrate
def get_total_dietaryFibre(self):
return self.serving_size * self.food.dietary_fibre
def get_total_sugar(self):
return self.serving_size * self.food.sugar
def get_total_protein(self):
return self.serving_size * self.food.protein
| [
"noreply@github.com"
] | noreply@github.com |
4746c981797505bf69fd57f0d97bf558710322b5 | 611e5481b709f22d01c7afad4fddd1bb291efcf0 | /LogAndReg/apps/LogAndReg_app/migrations/0001_initial.py | 81ab53f976b6299170ea42bc06d572125577d170 | [] | no_license | AaronDasani/Django | 2ffc5ffc69266d59570c3faa19cfde8e13cdd307 | 7acf0721744accb333f15013d9988f5fe3b1ad5c | refs/heads/master | 2021-07-23T23:30:23.457589 | 2019-01-14T19:23:57 | 2019-01-14T19:23:57 | 150,312,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-10-02 01:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('confirm_password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"maad@Aarons-iMac.local"
] | maad@Aarons-iMac.local |
9a8e5ff5ac645a3cc48a2db51ef611314f4736f6 | 20a358db6e9e9872453a7fb36ef21268054b241d | /pyml/ditech/database/insert_traffic.py | 95f8193ac0e10728700c619c82578331c5c5dc3e | [] | no_license | fengkaicnic/pyml | ee654cdef2ba107e1c1e8d598691af3accb96b3c | a19865cdb9eb69517258416a2b08b86f9d43a023 | refs/heads/master | 2021-01-21T04:40:44.659607 | 2016-07-29T08:33:07 | 2016-07-29T08:33:07 | 44,159,061 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | import utils
import traceback
import os
import time
import pdb
start = time.time()
try:
path = 'D:/ditech/citydata/season_2/test_set_2/traffic_data'
conn = utils.persist.connection()
cur = conn.cursor()
num = 0
for pl in os.listdir(path):
if not '.' in pl:
with open(path + '/' + pl) as file:
lines = file.readlines()
for line in lines:
lst = line.split('\t')
lst = map(lambda x:x.strip(), lst)
for tline in lst[1:-1]:
sql = 'insert into traffic_test2(district_hash, tj_level, tj_time) \
values("%s", "%s", "%s")' % (lst[0], tline, lst[-1])
cur.execute(sql)
conn.commit()
conn.close()
except:
traceback.print_exc()
print sql
conn.commit()
conn.close()
end = time.time()
print end - start
| [
"fkdhy@163.com"
] | fkdhy@163.com |
002d0f4ce0b61dd442cd36dc1c9e72f71ac89dd4 | d6b835d1e72c347af67df701db803f18278f927e | /visit.py | 210822feca23acb97016b8fa6ba0d415aa28ab3a | [] | no_license | Dodon4/vtkvisit | a7333bb01f6784f756caaa5ab55b316c068aa1a2 | f612d558e18fa99564cf8c5d01e7b9ba2f6703b7 | refs/heads/master | 2020-09-24T20:45:15.465776 | 2019-12-23T14:51:44 | 2019-12-23T14:51:44 | 225,839,315 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,606 | py | import re
import math
import numpy as np
class reader:
def __init__(self):
pass
def read_csv(self):
pass
def read_sel(self, filename):
self.filename = filename
file = open(self.filename)
self.values = file.read().split("\n")
self.data = []
for key in self.values:
self.value = re.findall(r"[-+]?\d*\.\d+|\d+", key)
if len(self.value) == 5:
self.value[3] = (10 ** float(self.value[4])) * float(self.value[3])
del self.value[4]
# print(self.value)
if self.value != []:
self.data.append(self.value)
# print(self.data)
return self.data
def _read_cart(self, filename):
self.filename = filename
file = open(self.filename)
self.values = file.read().split("\n")
self.data = []
for key in self.values:
self.value = re.findall(r"[-+]?\d*\.\d+|\d+", key)
if self.value != []:
self.data.append(self.value)
return self.data
class plot:
def __init__(self, data,data_cart):
self.data = data
self.data_cart = data_cart
def _hex_cornerX(self, centerX, i):# X координата шестиугольника
self.centerX = centerX
self.i = i
self.angle_deg = 60 * i + 30
self.angle_rad = math.pi / 180 * self.angle_deg
return self.centerX + self.size * math.cos(self.angle_rad)
def _get_max_num(self, i):# максимальный номер в столбце
self.max = 0
self.i = i
for index in range((len(self.data))):
while self.max < int(self.data[index][i]):
self.max = int(self.data[index][i])
return self.max
def _check_mask(self, maskPos):# проверка номера на наличие в картограмме
for i in range(round(len(self.data_cart) / 6)):
for item in self.data_cart[i]:
if int(item) == maskPos:
return True
return False
def _num_of_cells(self):#количество ячеек в картограмме
self.max_Mask = 0
for i in range(round(len(self.data_cart) / 6)):
for item in self.data_cart[i]:
if self.max_Mask < int(item):
self.max_Mask = int(item)
return self.max_Mask
def _hex_cornerY(self, centerY, i):# Y координата шестиугольника
self.centerY = centerY
self.i = i
self.angle_deg = 60 * i + 30
self.angle_rad = math.pi / 180 * self.angle_deg
return self.centerY + self.size * math.sin(self.angle_rad)
def _column(self, matrix, i):# столбец во float
return [float(row[i]) for row in matrix]
def _rings(self, matrix):#количество колец
self.matrix = matrix
return (1 + (1 + 4 * ((self.matrix) - 1) / 3) ** (1 / 2)) / 2
def _data2d_2d(self,x):#данные для 2д отрисовки с 2д файла
self.x = x
self.data_show = []
self.ind = 0
if self._get_max_num(0)> self._get_max_num(1):
self.ind = 1
else:
self.ind = 0
for i in range(len(self.data)):
if x == int(self.data[i][self.ind]):
self.data_show.append(self.data[i])
return self.data_show
def _data3d_2d(self,x,y):#данные для 2д отрисовки с 3д файла
self.x = x
self.y = y
self.data_show = []
self.dataX = []
self.ind1 = 0
self.ind2 = 0
if self._get_max_num(0) > self._get_max_num(1) and self._get_max_num(0) > self._get_max_num(2):
self.ind1 = 1
self.ind2=2
elif self._get_max_num(1) > self._get_max_num(0) and self._get_max_num(1) > self._get_max_num(2):
self.ind1=0
self.ind2=2
else:
self.ind1=1
self.ind2=2
self.dataX=[]
for i in range(len(self.data)):
if x == int(self.data[i][self.ind1]):
self.dataX.append(self.data[i])
for i in range(len(self.dataX)):
if y == int(self.dataX[i][self.ind2]):
self.data_show.append(self.dataX[i])
return self.data_show
def plt2d(self,size,*ind):#2д отрисовка
self.data_show = []
if len(ind) == 2:# проверяем аргументы и возвращаем данные для отрисовки
self.data_show=self._data3d_2d(ind[0],ind[1])
elif len(ind) == 1:
self.data_show=self._data2d_2d(ind[0])
else:
return -1
#print(self.data_show)
self.size = size
self.points = self._column(self.data_show, -1)
self.num = self._num_of_cells() + 1
self.r = math.ceil(self._rings(self.num))
self.stepX = self.size / 2 * math.cos(math.pi / 3)
self.stepY = self.size / 2 * math.sin(math.pi / 3)
self.centerX = 0
self.centerY = 0
self.shiftY = 0
self.shiftX = 0
self.ring = 0
self.width = self.size * 2
self.vert = self.width * 3 / 4
self.height = self.width ** (3 / 2)
self.MaskPos = 0#номер
with open('2d.vtk', 'w', encoding='utf-8') as f:
f.write("# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " + str(
6 * self.num) + " float\n")
for j in range(int(self.r)):# количество колец
if (j == 0):
f.write(str(self._hex_cornerX(self.centerX, 0)) + " " + str(self._hex_cornerY(self.centerY, 0)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 1)) + " " + str(
self._hex_cornerY(self.centerY, 1)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 2)) + " " + str(
self._hex_cornerY(self.centerY, 2)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 3)) + " " + str(
self._hex_cornerY(self.centerY, 3)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 4)) + " " + str(
self._hex_cornerY(self.centerY, 4)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 5)) + " " + str(
self._hex_cornerY(self.centerY, 5)) + \
" 0.0\n")
self.MaskPos += 1
for index in range(j * 3):#половина количества ячеек в кольце
if self._check_mask(self.MaskPos):#Проверка наличия ячейки MaskPos в картограмме
f.write(
str(self._hex_cornerX(self.centerX, 0)) + " " + str(self._hex_cornerY(self.centerY, 0)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 1)) + " " + str(
self._hex_cornerY(self.centerY, 1)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 2)) + " " + str(
self._hex_cornerY(self.centerY, 2)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 3)) + " " + str(
self._hex_cornerY(self.centerY, 3)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 4)) + " " + str(
self._hex_cornerY(self.centerY, 4)) + \
" 0.0\n" + str(self._hex_cornerX(self.centerX, 5)) + " " + str(
self._hex_cornerY(self.centerY, 5)) + " 0.0\n")
if self._check_mask(self.MaskPos + 3 * j):#Проверка наличия ячейки,симметричной MaskPos относительно центра
#в картограмме
f.write(
str(-self._hex_cornerX(self.centerX, 0)) + " " + str(-self._hex_cornerY(self.centerY, 0)) + \
" 0.0\n" + str(-self._hex_cornerX(self.centerX, 1)) + " " + str(
-self._hex_cornerY(self.centerY, 1)) + \
" 0.0\n" + str(-self._hex_cornerX(self.centerX, 2)) + " " + str(
-self._hex_cornerY(self.centerY, 2)) + \
" 0.0\n" + str(-self._hex_cornerX(self.centerX, 3)) + " " + str(
-self._hex_cornerY(self.centerY, 3)) + \
" 0.0\n" + str(-self._hex_cornerX(self.centerX, 4)) + " " + str(
-self._hex_cornerY(self.centerY, 4)) + \
" 0.0\n" + str(-self._hex_cornerX(self.centerX, 5)) + " " + str(
-self._hex_cornerY(self.centerY, 5)) + " 0.0\n")
self.MaskPos += 1
self.centerX -= self.width / 2 # рассчет координатов точки
self.centerY += self.vert + self.size / 4
if (index >= j):
self.centerX -= self.width / 2
self.centerY -= self.vert + self.size / 4
if (index >= 2 * j):
self.centerX += self.width / 2
self.centerY -= self.vert + self.size / 4
self.MaskPos += 3 * j
self.shiftX += self.width
self.centerX = self.shiftX
self.centerY = self.shiftY
f.write("POLYGONS " + str(self.num) + " " + str(self.num * 7) + "\n")
for index in range(self.num):
f.write("6 " + str(0 + index * 6) + " " + str(1 + index * 6) + " " + str(2 + index * 6) + " " + str(
3 + index * 6) + " " + str(4 + index * 6) + " " + str(5 + index * 6) + "\n") #соединяем полигоны
f.write("\nCELL_DATA " + str(self.num) + "\n\nSCALARS p float\n" + "\nLOOKUP_TABLE default\n")
self.k = 0
for j in range(int(self.r)):#записываем данные для каждой ячейки
if (j == 0):
f.write(str(self.points[self.k]) + "\n")
self.k += 1
for index in range(j * 3):
if self._check_mask(self.k):
f.write(str(self.points[self.k]) + "\n")
if self._check_mask(self.k + 3 * j):
f.write(str(self.points[self.k + 3 * j]) + "\n")
self.k += 1
self.k += 3 * j
def _data1d(self,x):# данные для 1д отрисовки с 1д файла
self.data_show=[]
if type(x)==int:
self.dataX=[]
for i in range(len(self.data)):
if x==int(self.data[i][0]):
self.data_show.append(self.data[i])
else:
for j in range(len(x)):
for i in range(len(self.data)):
if x[j]==int(self.data[i][0]):
self.data_show.append(self.data[i])
return self.data_show
def _data2d(self,x,y):# данные для 1д отрисовки с 2д файла
self.data_show=[]
if type(x)==int:
self.dataX=[]
for i in range(len(self.data)):
if x==int(self.data[i][0]):
self.dataX.append(self.data[i])
if type(y)==int:
for i in range(len(self.dataX)):
if y==int(self.dataX[i][1]):
self.data_show.append(self.dataX[i])
else:
for j in range(len(y)):
for i in range(len(self.dataX)):
if y[j]==int(self.dataX[i][1]):
self.data_show.append(self.dataX[i])
else:
self.dataX=[]
for j in range(len(x)):
for i in range(len(self.data)):
if x[j]==int(self.data[i][0]):
self.dataX.append(self.data[i])
for i in range(len(self.dataX)):
if y==int(self.dataX[i][1]):
self.data_show.append(self.dataX[i])
return self.data_show
def _data3d(self,x,y,z):# данные для 1д отрисовки с 3д файла
self.data_show=[]
if type(x)==int:
self.dataX=[]
for i in range(len(self.data)):
if x==int(self.data[i][0]):
self.dataX.append(self.data[i])
if type(y)==int:
self.dataXY=[]
for i in range(len(self.dataX)):
if y==int(self.dataX[i][1]):
self.dataXY.append(self.dataX[i])
if type(z)==int:
for i in range(len(self.dataXY)):
if z==int(self.dataXY[i][2]):
self.data_show.append(self.dataXY[i])
else:
for j in range(len(z)):
for i in range(len(self.dataXY)):
if z[j]==int(self.dataXY[i][2]):
self.data_show.append(self.dataXY[i])
else:
self.dataXY=[]
for j in range(len(y)):
for i in range(len(self.dataX)):
if y[j]==int(self.dataX[i][1]):
self.dataXY.append(self.dataX[i])
for i in range(len(self.dataXY)):
if z==int(self.dataXY[i][2]):
self.data_show.append(self.dataXY[i])
else:
self.dataX=[]
for j in range(len(x)):
for i in range(len(self.data)):
if x[j]==int(self.data[i][0]):
self.dataX.append(self.data[i])
self.dataXY=[]
for i in range(len(self.dataX)):
if y==int(self.dataX[i][1]):
self.dataXY.append(self.dataX[i])
for i in range(len(self.dataXY)):
if z==int(self.dataXY[i][2]):
self.data_show.append(self.dataXY[i])
return self.data_show
def plt1d(self,*ind):
#print(len(ind))
if len(ind)==3:
self.data_show=self._data3d(ind[0],ind[1],ind[2])
elif len(ind)==2:
if(len(self.data[0])==3):
self.data_show=self._data2d(ind[0],ind[1])
else:
self._data3d(range(0,self._get_max_num(0)+1),ind[0],ind[1])
elif len(ind)==1:
if(len(self.data[0])==2):
self.data_show=self._data1d(ind[0])
else:self._data2d(range(0,self._get_max_num(0)+1),ind[0])
elif len(ind) == 0:
self.data_show=self.data
else:
return -1
print(self.data_show)
self.points = self._column(self.data_show, -1)
with open('1d.vtk', 'w', encoding='utf-8') as f:
f.write("# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " + str(
len(self.points)) + " float\n")
for index in range(len(self.points)):
if index == 0 :
f.write(str(index) + ".0 " + str(self.points[index]) + " 0.0" + "\n")
else:
f.write(str(index) + "0.0 " + str(self.points[index]) + " 0.0" + "\n")
f.write("LINES " + str(len(self.data_show) - 1) + " " + str(3 * (len(self.data_show)-1)) + "\n")
for index in range(len(self.data_show) - 1):
f.write("2" + " " + str(index) + " " + str(index + 1) + "\n")
| [
"noreply@github.com"
] | noreply@github.com |
f12909ae3687b94a8897089cdd9e2b9641468040 | 1850d9b3449350f5e985bead1bb2956cf3ec9fcf | /get_title.py | 085add443075ed10d66e2741df9a6adcdbf014eb | [] | no_license | adityashetty0302/selenium-web-scraping-geckodriver | 07b02e614382d7285bb34c8b5bfc2cea1ca262ea | 1ee0249c0d0619946890c6f7e57d0e7b9df4e073 | refs/heads/master | 2020-03-25T00:25:24.371284 | 2018-08-01T17:15:12 | 2018-08-01T17:15:12 | 143,184,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from selenium import webdriver
if __name__ == '__main__':
driver = webdriver.Firefox()
driver.get('http://google.com')
print(driver.title)
driver.quit()
| [
"adityashetty0302@gmail.com"
] | adityashetty0302@gmail.com |
8e0c690433c7e302e6f8520617120f771d883118 | dca50b689638d1f26f15e9b97c73b6c687f4d61c | /fibonacci test 2.py | 27969f180da62b986b3dafb15ee7f4c8081b6ad9 | [] | no_license | mtakle/learningpython | 3e1d8c1e0a420dfb2746bc67c75054f39500414d | 5cd9616ffcc7e54f215c243a40de580e94f184ac | refs/heads/master | 2020-03-27T09:16:31.644532 | 2018-09-04T11:47:19 | 2018-09-04T11:47:19 | 146,326,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #test fibonacci sequence
import time
print("Behold...")
time.sleep(2)
print("Pumbas fantastic fibonacci sequence!")
time.sleep(2)
x = 0
x_inc = 1
counter = 1
while counter < 30:
print (x)
counter = counter + 1
x_hold = x
x = x_inc
x_inc = x_inc + x_hold
time.sleep(0.5)
#prints 0
# sets x to 1
# sets x inc to 2
#prints 1
#sets x eual to 2
#sets x inc to 3
#prints 2
#sets x equal to 3
#sets x inc equal to 5
#prints 3
#sets x equal to 5
#sets x inc equal to 8
| [
"martin.takle@gmail.com"
] | martin.takle@gmail.com |
5ca9fcb65dd52ac01b9cea42017d84e0548f5053 | 66b98283032a9214e758fe851f18e82304406ac0 | /PersonalStylist/settings.py | 000bea7d62e1f349da05bcf8d9b629b7f13bd46d | [] | no_license | Stuti-Saha12/Personal-Stylist | d80f8eae65d695f25bc899f609714e2f21e934b2 | ae1e38f17e8b3231f1296006db4842165ad58e3d | refs/heads/master | 2023-03-30T20:23:18.391323 | 2021-04-08T08:29:04 | 2021-04-08T08:29:04 | 355,810,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,567 | py | """
Django settings for PersonalStylist project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g6-8!vqt2d@3g6%xdtj_r5f8sjctm(k9fz9u(1#+6cm4*9t+5e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Outfits',
'rest_framework',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'PersonalStylist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'PersonalStylist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'outfitsdb',
'USER': 'postgres',
'PASSWORD': 'aswert',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
STATICFILES_DIRS = [os.path.join(BASE_DIR,'static')]
CRISPY_TEMPLATE_PACK = 'bootstrap4' | [
"b519054@iiit-bh.ac.in"
] | b519054@iiit-bh.ac.in |
4164bb650ac1f2b89c7848b186d58aea4fd000c5 | 81814509532f00af2a821258235882a50e31bde7 | /tests/encoders/text/tfhub/test_bert.py | ad7f6223fdcce21e207944cf459f76a64ffcda16 | [
"Apache-2.0"
] | permissive | dsp6414/vectorhub | 8639bdea4b3ecef1b2eb94c5bd60bd373205db25 | aac42978e90b8fb2433ce2966e3bd422e7c45319 | refs/heads/main | 2023-02-04T09:55:40.174943 | 2020-12-23T16:47:00 | 2020-12-23T16:47:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from vectorhub.encoders.text.tfhub import Bert2Vec
import numpy as np
def test_bert_encode():
"""
Testing for bert encoding
"""
client = Bert2Vec()
result = client.encode('Cat')
assert np.array(result).shape == (1024,)
| [
"jacky.koh@vylar.org"
] | jacky.koh@vylar.org |
ec7fd252a95b77db53b5062b981abcbb49d101aa | 4bae4a5ff3089ea478a536c6f9d10803f0a1058b | /manage.py | 9a5a83a4199a701fff98fe0afa24fde7ab5e8ad9 | [] | no_license | mohamed-amine-maaroufi/social-network-django | db6a36bbb6519361ed5b44f5f91ec1f6615a9970 | 4c8fe63a7060427c172b7cede4c5a44258de10d1 | refs/heads/master | 2021-08-28T18:26:05.297170 | 2017-12-12T23:23:17 | 2017-12-12T23:23:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ArticleVF.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"aminemaaroufi40@gmail.com"
] | aminemaaroufi40@gmail.com |
6a6afc43a4b884765203d1c136ab7a64752ee595 | 44116b4dbc69046d5947ec7087be9eb87303e211 | /src/tact/server/websocket.py | 1db591283c587148c660788ee47bee65cf843d14 | [
"MIT"
] | permissive | wabain/tact | 76ae0258069ca9f84ee0382dd22268fadabf5acb | bd95608bebc640e47f31f6d0a403108fe998188d | refs/heads/master | 2020-04-13T04:02:20.404999 | 2019-07-07T21:42:56 | 2019-07-07T21:42:56 | 162,949,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | """Base definitions for the websocket interface"""
import json
from abc import ABC, abstractmethod
class WebsocketConnectionLost(Exception):
pass
class AbstractWSManager(ABC):
@abstractmethod
async def send(self, conn_id: str, msg: dict) -> None:
# This is implemented as a separate method primarily for test
# convenience
serialized = json.dumps(msg, separators=(',', ':'))
await self._send_serialized(conn_id, serialized)
@abstractmethod
async def _send_serialized(self, conn_id: str, msg: str) -> None:
raise NotImplementedError
@abstractmethod
async def close(self, conn_id: str):
raise NotImplementedError
| [
"bain.william.a@gmail.com"
] | bain.william.a@gmail.com |
bda8328e8e1c9a1ef0fde6461ed58bd0cae16a37 | 588915c92a1c546293e251db05720016b97a1d3e | /tests/__init__.py | a3283795fd07468739ce30dce3c8db5e01780a0c | [
"MIT"
] | permissive | ielia/prtg-py | 1ec9f316e98c914b493b4b881d1ef9a792f76454 | 438087b68e5f26d3517186580575199f16527c62 | refs/heads/master | 2021-01-10T18:42:16.556033 | 2015-05-21T21:42:38 | 2015-05-21T21:42:38 | 31,963,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | __author__ = 'kevinschoon'
| [
"elia.ignacio@gmail.com"
] | elia.ignacio@gmail.com |
37a6ef549d519244c685812adcf0bc114fca9be2 | ebe9f5c8196ec0a381542c05f8d6a13c24b99e98 | /test/test_v.py | 80ebe1c2cac4ddf1b69b1a8ec012a5a2145bed58 | [] | no_license | Ferdi265/subleq-verilog | dfabfefeb27bf970796273e8194f8cdda44d1fed | c66c34be30dbc12b4b4b6df609638d9ecb3d4538 | refs/heads/master | 2023-05-26T13:56:44.756669 | 2023-05-10T11:48:57 | 2023-05-10T11:48:57 | 230,788,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | import sys
import os.path
from subprocess import check_output
from hlsubleq.sim import *
from hlsubleq.hlasm import *
import test_hl
subleq_dir = os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
class VSimAssembler(SimAssembler):
def simulate(self, n = None):
with open(subleq_dir + "/memory.hex", "w") as f:
for i in range(0x10000):
f.write("{:04x}\n".format(self.get_addr(i)))
dump = check_output([subleq_dir + "/subleq", "+autotest"], cwd = subleq_dir).decode()
i = 0
for line in dump.split("\n")[:-1]:
if "$finish" in line:
continue
addr, entries = line.split(": ")
for j, entry in enumerate(entries.split(" ")):
self.set_addr(i + j, int(entry, 16))
i += 16
sys.stdout.write(".")
sys.stdout.flush()
test_hl.Asm = create_assembler([HlAssembler, VSimAssembler, StringInputAssembler, Assembler])
| [
"theferdi265@gmail.com"
] | theferdi265@gmail.com |
5e1db68f4a4076d21071d6c0ef6884c57ef81d51 | e2e3982c626b49d57c51b5abafc0a0eb20915e6a | /factorization-machines/lightFM_plots.py | dd52379ce7229a1e23b79d35d12407e49dcbc859 | [] | no_license | mindis/thesis | ee683869627e54b620c8582c8365205e0b5fd424 | 3676aa32be4f115ea0c79448ee6391d3386d496d | refs/heads/master | 2022-04-25T22:55:51.846029 | 2020-04-27T18:39:11 | 2020-04-27T18:39:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,315 | py | import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from lightfm import LightFM
from lightfm import cross_validation
from lightfm.datasets import fetch_movielens
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from lightfm.evaluation import precision_at_k
from lightfm.evaluation import recall_at_k
from lightfm.evaluation import reciprocal_rank
from lightfm.evaluation import auc_score
def create_sparse_matrix(data,user_key = 'user_id',item_key='product_id'):
data[user_key] = data[user_key].astype("category")
data[item_key] = data[item_key].astype("category")
#data['brand'] = data['brand'].astype("category")
data['user'] = data[user_key].cat.codes
data['item'] = data[item_key].cat.codes
print(data)
# Create a lookup frame so we can get the brand names back in
# readable form later.
user_lookup = data[['user', user_key]].drop_duplicates()
item_lookup = data[['item', item_key]].drop_duplicates()
#brand_lookup['brand_id'] = item_lookup.brand_id.astype(str)
user_lookup['user'] = user_lookup.user.astype(str)
user_lookup = pd.DataFrame(user_lookup)
user_lookup.set_index(user_key,inplace=True)
item_lookup['item'] = item_lookup.item.astype(str)
print(user_lookup,item_lookup)
#data = data.drop(['brand','event_type',user_key,item_key], axis=1)
#print(data)
# Create lists of all users, items and their event_strength values
users = list(np.sort(data.user.unique()))
items = list(np.sort(data.item.unique()))
#brands = list(np.sort(data.brand_id.unique()))
#actions = list(data.eventStrength)
actions = list(data.rating)
#print(users,brands,actions)
# Get the rows and columns for our new matrix
rows = data.user.astype(int)
cols = data.item.astype(int)
# Create a sparse matrix for our users and brands containing eventStrength values
data_sparse_new = csr_matrix((actions, (cols, rows)), shape=(len(items), len(users)))
return data_sparse_new, user_lookup, item_lookup
if __name__ == '__main__':
#TRAIN_PATH = '/home/nick/Desktop/thesis/datasets/cosmetics-shop-data/implicit-data/implicit_ratings_thr5.csv'
TRAIN_PATH = '/home/nick/Desktop/thesis/datasets/pharmacy-data/ratings-data/user_product_ratings.csv'
# TEST_PATH = '/home/nick/Desktop/thesis/datasets/cosmetics-shop-data/implicit-data/implicit_feedback_testdata.csv'
traindata = pd.read_csv(TRAIN_PATH)
print(traindata)
print('\n')
# print(testdata)
user_key = 'user_id'
item_key = 'product_id'
csr_data1, user_lookup1, item_lookup1 = create_sparse_matrix(traindata, user_key, item_key)
user_items_train = csr_data1.T.tocsr()
print(user_items_train)
print('\n')
print(user_items_train.shape)
# print(user_items_test.shape)
print("Splitting the data into train/test set...\n")
train, test = cross_validation.random_train_test_split(user_items_train)
alpha = 1e-05
epochs = 50
num_components = 32
warp_model = LightFM(no_components=num_components,
loss='warp',
learning_schedule='adagrad',
max_sampled=100,
user_alpha=alpha,
item_alpha=alpha)
bpr_model = LightFM(no_components=num_components,
loss='bpr',
learning_schedule='adagrad',
user_alpha=alpha,
item_alpha=alpha)
warp_duration = []
bpr_duration = []
warp_auc = []
bpr_auc = []
print("Start Training...\n")
for epoch in range(epochs):
start = time.time()
warp_model.fit_partial(train, epochs=1)
warp_duration.append(time.time() - start)
warp_auc.append(auc_score(warp_model, test, train_interactions=train).mean())
for epoch in range(epochs):
start = time.time()
bpr_model.fit_partial(train, epochs=1)
bpr_duration.append(time.time() - start)
bpr_auc.append(auc_score(bpr_model, test, train_interactions=train).mean())
x = np.arange(epochs)
plt.plot(x, np.array(warp_auc))
plt.plot(x, np.array(bpr_auc))
plt.legend(['WARP AUC', 'BPR AUC'], loc='upper right')
plt.show()
| [
"nick_gianno@hotmail.com"
] | nick_gianno@hotmail.com |
f90b85d5e509620f16028f193e78c9d441e42ff3 | e2f525606d213767e85de9f55df236b7a21ea237 | /code/class_ui_rankwindow.py | 7440d7108cb1f6201a331ccbf8f384fad7f6da41 | [] | no_license | ricew4ng/Slim-Typer-v1.0 | b975697fd5d0fcd719c25fb61a80f69d244e4ed4 | 9b066770457027f0a5ad1476d525bad156d8e925 | refs/heads/master | 2022-10-27T07:39:27.420377 | 2018-09-23T02:12:20 | 2018-09-23T02:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,170 | py | #coding:utf8
# "本地记录"这个窗口的类脚本
from PyQt5 import QtWidgets
from ui_rankwindow import Ui_rankwindow
class ui_rankwindow(QtWidgets.QWidget,Ui_rankwindow):
#创建实例时需有参数QMainWindow对象
def __init__(self,mainwindow):
super(ui_rankwindow,self).__init__()
self.setupUi(self) #初始化ui
self.radio_blank.hide()
self.mainwindow = mainwindow
#类变量
self.rank_model = 'ch_word_' #rank的模式
self.rank_degree = '' #rank的难度
#给cbbox选择模式框的改变选项事件绑定change_model函数
self.cbbox_model.currentTextChanged.connect(self.change_model)
#给单选按钮(初级中级高级)的点击事件添加change_degree函数
self.radio_low.clicked.connect(self.change_degree)
self.radio_mid.clicked.connect(self.change_degree)
self.radio_high.clicked.connect(self.change_degree)
self.radio_blank.clicked.connect(self.change_degree)
#给菜单栏的"纪录"选项也就是action_offline_rank添加 self.show函数
self.mainwindow.action_offline_rank.triggered.connect(self.show)
#根据combobox的变化来决定类变量self.rank_model的值
def change_model(self):
current_text = self.cbbox_model.currentText()
if current_text == '中文词语模式':
self.rank_model = 'ch_word_'
elif current_text == '中文句子模式':
self.rank_model = 'ch_sentence_'
elif current_text == '英文单词模式':
self.rank_model = 'en_word_'
elif current_text == '英文句子模式':
self.rank_model = 'en_sentence_'
#每当改变combobox时,就将空白按钮设置成被选中状态,并且清空纪录显示
self.radio_blank.setChecked(True)
for i in range(1,6):
self.set_blank(i)
#根据难度按钮的选择,改变类变量self.rank_degree的值
def change_degree(self):
if self.radio_low.isChecked():
self.rank_degree = 'low'
elif self.radio_mid.isChecked():
self.rank_degree = 'mid'
elif self.radio_high.isChecked():
self.rank_degree = 'high'
elif self.radio_blank.isChecked():
for i in range(1,6):
self.set_blank(i)
return
#当得到了rank_degree的值后,就执行一次获取纪录
self.get_rank()
#获取本地纪录函数
def get_rank(self):
try:
#将两个类变量结合起来赋值给file_path,也就是要载入的对应纪录文本
file_path = './source/rank/'+self.rank_model+self.rank_degree+'.txt'
#这里因为是windows环境下,没有对目标文本进行utf8编码处理,所以会在linux下有问题,需要自行转换。encoding='utf8',在windows下转换会报编码error
f = open(file_path,'r')
i = 1
for line in f:
line = line.replace('\n','')
data = line.split(' ')
self.set_rank(i,data)
i+=1
#i-=1
while i <= 5:
self.set_blank(i)
i+=1
except Exception as e:
print('rank open error')
raise e
#此函数将成绩显示在gui界面上
#输入参数num是指输入在第几行,data是一个list元素,[0]为用时,[1]为完成速度,[2]为记录时间
def set_rank(self,num,data):
if num == 1:
self.rank_time_set1.setText(data[0])
self.rank_speed_set1.setText(data[1])
self.rank_marktime_set1.setText(data[2])
elif num == 2:
self.rank_time_set2.setText(data[0])
self.rank_speed_set2.setText(data[1])
self.rank_marktime_set2.setText(data[2])
elif num == 3:
self.rank_time_set3.setText(data[0])
self.rank_speed_set3.setText(data[1])
self.rank_marktime_set3.setText(data[2])
elif num == 4:
self.rank_time_set4.setText(data[0])
self.rank_speed_set4.setText(data[1])
self.rank_marktime_set4.setText(data[2])
elif num == 5:
self.rank_time_set5.setText(data[0])
self.rank_speed_set5.setText(data[1])
self.rank_marktime_set5.setText(data[2])
#将对应行的纪录清除的函数,输入参数num是int型。即对应行数
def set_blank(self,num):
if num == 1:
self.rank_time_set1.setText('')
self.rank_speed_set1.setText(' 无')
self.rank_marktime_set1.setText('')
elif num == 2:
self.rank_time_set2.setText('')
self.rank_speed_set2.setText(' 无')
self.rank_marktime_set2.setText('')
elif num == 3:
self.rank_time_set3.setText('')
self.rank_speed_set3.setText(' 无')
self.rank_marktime_set3.setText('')
elif num == 4:
self.rank_time_set4.setText('')
self.rank_speed_set4.setText(' 无')
self.rank_marktime_set4.setText('')
elif num == 5:
self.rank_time_set5.setText('')
self.rank_speed_set5.setText(' 无')
self.rank_marktime_set5.setText('')
#载入qss文本的函数,实现本窗口的ui样式。
def load_qss(self,qss_path):
stylesheet = ''
try:
file = open(qss_path,'r')
stylesheet = ''
for line in file:
stylesheet+=line
except:
print('qss improt error')
return stylesheet
#重写show函数,在show时载入样式并清空所有纪录。
def show(self):
for i in range(1,6):
self.set_blank(i)
stylesheet = self.load_qss('./qss/rank_qss.txt')
self.setStyleSheet(stylesheet)
super().show()
#重写hide函数,在hide时选中radio_blank
def hide(self):
self.radio_blank.setChecked(True)
super().hide()
| [
"noreply@github.com"
] | noreply@github.com |
66c6106ce862911772a432abc48e73209d3f4d47 | ca00910013499f5f3c8e970fc9379ddc74c23ddb | /bin/pip | d78922611a3bdf12114c6e1484a6884a87b93955 | [] | no_license | saposki/saposki7 | 71feb479bf39614d91780f858d90ad776704f0ff | a1c67c4423f2ef80ce3a8a02dc7cccd143bba8b8 | refs/heads/master | 2020-06-04T14:54:38.367872 | 2015-01-20T09:34:45 | 2015-01-20T09:34:45 | 29,083,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | #!/Users/blackrob/Dropbox/saposki_flask/sf/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"saposki@gmail.com"
] | saposki@gmail.com | |
13304ad34c9181779d72a2811439ff96eabc20cf | f8201014d20832d4cc217b473500501cf16df8ba | /virtool/genbank.py | 7035b74b89e201906c6cfa858afebbf05f253176 | [
"MIT"
] | permissive | gitter-badger/virtool | abc996ef8dc160f1fe879a55d6eec4e9043c9840 | 628acc377fb0497c2bfe75e9fa0a61decc59e0e6 | refs/heads/master | 2020-04-23T04:47:02.186926 | 2019-02-15T03:01:12 | 2019-02-15T03:01:12 | 170,919,108 | 0 | 0 | null | 2019-02-15T19:42:26 | 2019-02-15T19:42:25 | null | UTF-8 | Python | false | false | 1,933 | py | import logging
import string
import virtool.http.proxy
logger = logging.getLogger(__name__)
EMAIL = "dev@virtool.ca"
TOOL = "virtool"
FETCH_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
async def fetch(settings, session, accession):
"""
Fetch the Genbank record for the passed `accession`.
:param settings: the application settings object
:type settings: :class:`virtool.app_settings.Settings`
:param session: an aiohttp client session
:type session: :class:`aiohttp.ClientSession`
:param accession: the accession to fetch
:type accession: Union[int,str]
:return: parsed Genbank data
:rtype: dict
"""
params = {
"db": "nuccore",
"email": EMAIL,
"id": accession,
"retmode": "text",
"rettype": "gb",
"tool": TOOL
}
async with virtool.http.proxy.ProxyRequest(settings, session.get, FETCH_URL, params=params) as resp:
body = await resp.text()
if resp.status != 200:
if "Failed to retrieve sequence" not in body:
logger.warning("Unexpected Genbank error: {}".format(body))
return None
data = {
"host": ""
}
for line in body.split("\n"):
if line.startswith("VERSION"):
data["accession"] = line.replace("VERSION", "").lstrip(" ")
if line.startswith("DEFINITION"):
data["definition"] = line.replace("DEFINITION", "").lstrip(" ")
if "/host=" in line:
data["host"] = line.lstrip(" ").replace("/host=", "").replace('"', "")
# Extract sequence
sequence_field = body.split("ORIGIN")[1].lower()
for char in [" ", "/", "\n"] + list(string.digits):
sequence_field = sequence_field.replace(char, "")
data["sequence"] = sequence_field.upper()
return data
| [
"igboyes@gmail.com"
] | igboyes@gmail.com |
fc563352b05665f7e7e956ba37bb51637d95c6b5 | bb4ddec71c91d64bda34f2002bcea3e7639b45f2 | /03-services-redis/main.py | b32d10a62769eaaa0be52eb7c1766b4c2ce68619 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | alexwillzhj/python-machine-learning-api | 4dc0ae759694a59fa0b478c08a646fee3407098b | 917c4ad08f8fe8ba1ada4e71eb708149b1fe7dc1 | refs/heads/master | 2021-01-20T13:58:10.603821 | 2017-05-08T01:00:43 | 2017-05-08T01:00:43 | 90,543,495 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | """
A simple app to show redis service integration.
Author: Ian Huston
License: See LICENSE.txt
"""
from flask import Flask
import os
import redis
import json
app = Flask(__name__)
# Get port from environment variable or choose 9099 as local default
port = int(os.getenv("PORT", 9099))
# Get Redis credentials
if 'VCAP_SERVICES' in os.environ:
services = json.loads(os.getenv('VCAP_SERVICES'))
redis_env = services['rediscloud'][0]['credentials']
else:
redis_env = dict(hostname='localhost', port=6379, password='')
redis_env['host'] = redis_env['hostname']
del redis_env['hostname']
redis_env['port'] = int(redis_env['port'])
# Connect to redis
try:
r = redis.StrictRedis(**redis_env)
r.info()
except redis.ConnectionError:
r = None
@app.route('/')
def keys():
if r:
current_hits = r.incr('hits')
return 'Hits: {}\n'.format(current_hits) + 'Available Keys: ' + str(r.keys())
else:
return 'No Redis connection available!'
@app.route('/<key>')
def get_current_values(key):
if r:
current_values = r.lrange(key, 0, -1)
return str(current_values)
else:
abort(503)
@app.route('/<key>/<s>')
def add_value(key, s):
if r:
r.rpush(key, s)
return 'Added {} to {}.'.format(s, key)
else:
abort(503)
if __name__ == '__main__':
# Run the app, listening on all IPs with our chosen port number
app.run(host='0.0.0.0', port=port)
| [
"alex.zhang@ge.com"
] | alex.zhang@ge.com |
fef3b2923673ec798f0c1f02da5775dc032ad1b8 | ac9b76ea5a3bd4618fbcadc4008b21174af6b913 | /TreeNode.py | 5194bafe672437cb0230593bcfcf1ba7d55b4153 | [] | no_license | olawesterlund/DecisionTree | 5c9040b4070f59b693c29a9199173f39198b9605 | bfd3be66a23eb24898fcbd34e57ba918ea254fd8 | refs/heads/master | 2022-03-15T16:36:35.074957 | 2018-03-19T09:32:23 | 2018-03-19T09:32:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | class TreeNode:
def __init__(self,att,att_dict,classes,children=None,child_examples=None):
self.attribute = att
self.children = children
self.child_examples = child_examples
self.att_dict = att_dict
self.classes = classes
def add_child(self, node, list_index):
self.children.insert(list_index, node)
return 0
# add examples for a child to this node
def add_examples(self, examples, i):
self.child_examples.insert(i, examples)
def print_node(self, indent, i):
printstr = ""
for s in range(0,indent):
printstr += " "
print(printstr,self.attribute, " = ", self.att_dict.get(self.attribute)[i])#, self.child_examples[i])
def print_tree(self, level = None):
if level is None:
level = 0
for child in range(0,len(self.children)):
if type(self.children[child]) is int:
self.print_leaf(level, child, self.children[child])
else:
self.print_node(level, child)
self.children[child].print_tree(level+1)
def print_leaf(self, level, i, val):
printstr = ""
for s in range(0,level):
printstr += " "
print(printstr,self.attribute, " = ", self.att_dict.get(self.attribute)[i], ": ", self.classes[val].upper())#, self.child_examples[i])
| [
"ola.oj.johansson@gmail.com"
] | ola.oj.johansson@gmail.com |
d631c815c2c1ba0870f891182e8369ce24c3be49 | 278060c3e3fce8c2d78640ac748188e80758deac | /tax_app/migrations/0002_auto_20191020_1607.py | d78e86314c315ed836c08685fd62b3ca35a1e8d3 | [] | no_license | ajisaq/BusinessTaxApp | 33507bb64cfabc4a84a56826db3ae90d55539359 | 08031f03a7018c59b2e9b0095e80a5ff0b7b0b70 | refs/heads/master | 2022-05-03T17:29:47.635710 | 2019-12-02T09:25:14 | 2019-12-02T09:25:14 | 219,758,403 | 1 | 3 | null | 2022-04-22T22:50:39 | 2019-11-05T13:59:07 | Python | UTF-8 | Python | false | false | 1,131 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-10-20 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tax_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Business_Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=15)),
('name', models.CharField(max_length=150)),
],
),
migrations.AddField(
model_name='profile',
name='contact',
field=models.CharField(default='9340-505', max_length=150),
preserve_default=False,
),
]
| [
"mohammedaliyu136@gmail.com"
] | mohammedaliyu136@gmail.com |
bead62e20492a2819230be41a9c5f127aba73e03 | ff4fc971fd73ca30101589ea5152672905c71d05 | /django/portfolio/contact/migrations/0002_auto_20210101_0735.py | 3ae7836c59864c7e298c30f50217ef188c5e7236 | [] | no_license | farhadict17/django-vue-start | 41e5cc59c92af1c454904d6548aa2aed0867c235 | c9a3a8a91ca16f941843cb22ffdd0f6c0fba8a4c | refs/heads/master | 2023-02-10T08:21:26.152534 | 2021-01-11T02:57:52 | 2021-01-11T02:57:52 | 328,530,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # Generated by Django 3.1.4 on 2021-01-01 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='name',
field=models.TextField(max_length=100),
),
]
| [
"farhadict17@gmail.com"
] | farhadict17@gmail.com |
dd3ea63e4a978a236d5ad87b031ef9a80b39082c | af4e2d54714c9db7a1d11f0d1dee67ec8873cfdc | /Tetris_game.py | 28e77210398913a0f18165a6ef3aee12bd0d87e9 | [] | no_license | nastprol/tetris | 6cd34d354c47cd75ccee1ff3a1bb144d390b8504 | 4c562a20625d2084a22f67cd355b3eb52ad85dfe | refs/heads/master | 2020-04-02T11:11:16.315166 | 2018-10-25T16:49:13 | 2018-10-25T16:49:13 | 154,375,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from Board import Board
from PyQt5.QtWidgets import QLineEdit, QPushButton, QLabel, QSpinBox
class Form(QMainWindow):
def __init__(self):
super().__init__()
self.resize(300, 300)
self.name = ""
self.lbl_name = QLabel("Enter your name", self)
self.lbl_name.move(20, 100)
self.lbl_name.resize(480, 70)
self.name_box = QLineEdit(self)
self.name_box.move(20, 200)
self.name_box.resize(480, 70)
self.button = QPushButton('START', self)
self.button.move(20, 300)
self.button.resize(480, 70)
self.button.clicked.connect(self.on_click)
def on_click(self):
self.name = self.name_box.text()
self.close()
self.game = Game(self.name)
class Game(QMainWindow):
def __init__(self, name):
super().__init__()
self.board = Board(name)
self.setCentralWidget(self.board)
self.statusbar = self.statusBar()
self.board.status[str].connect(self.statusbar.showMessage)
self.board.start()
self.resize(1500, 1500)
self.show()
class Start(QMainWindow):
def __init__(self):
super().__init__()
self.form = Form()
self.form.setGeometry(100, 100, 2000, 1500)
self.form.show()
if __name__ == '__main__':
app = QApplication([])
start = Start()
sys.exit(app.exec_())
| [
"nastprol@users.noreply.github.com"
] | nastprol@users.noreply.github.com |
30771aa74b83f38d6af9f40d00e581dbe58ccac6 | 01322837bc9e8fd74980b53f9b4f02867309588b | /Pytorch Story Net/Main.py | f110cb046e03921f37f5b27163d43a867ce03c0c | [] | no_license | JCSteiner/Directed-Study-Fall-2020 | fc39c382f0f96488fd6bd594925b349c0c3095f2 | 0ec90ecb834c8005129b11257bd2c34f16800b73 | refs/heads/master | 2022-12-29T18:43:50.601154 | 2020-10-20T11:52:15 | 2020-10-20T11:52:15 | 289,676,809 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | ###############################################################################
# #
# Main.py #
# J. Steiner #
# #
###############################################################################
#%%########################## IMPORT DEPENDENCIES #############################
#Imports helper functions from another file
from Helpers import *
#Imports the model structure
from Model import *
#Imports the dataloader
from DataLoader import *
#imports random functionality
import random
#%%############################ MODEL TRAINING ################################
#the number of words we want to generate
numWords = 200
#the number of epochs we want to start training from
epochStart = 0
#how much of our dataset we want to load
numRowsLoad = 5
#word2Idx dictionary
word2Idx = dict()
#idx2Word dictionary
idx2Word = dict()
#loads in our dataset
data = load('./Data/stories.csv', numRowsLoad, word2Idx, idx2Word)
#creates an instance of the model
model = Model(EMBEDDING_DIMS, HIDDEN_DIMS, len(word2Idx))
#trains the model
train(model, data, NUM_EPOCHS, LEARNING_RATE, epochStart, word2Idx)
#tries to load the state dict we specified
model.load_state_dict(torch.load("./States/state100"))
#prints the model output
word = random.choice(list(word2Idx.keys()))
line = runModel(model, word, numWords, word2Idx, idx2Word)
outFile = open('output.txt', 'w')
outFile.write('------------------------------------------------------------\n')
for i in range(len(line)):
outFile.write(line[i] + " ")
if i % 20 == 0:
outFile.write(" \n ")
i += 1
outFile.write('------------------------------------------------------------\n')
outFile.close() | [
"noreply@github.com"
] | noreply@github.com |
9f082efbc61644874f5ec6acf1a35d025d6bcbff | 263c8801cf5fc46270cde2a79af3029da0d2b682 | /wine_admin/wsgi.py | 6cdb09b1a0ee48086b3124d5ef59ec07f439335a | [] | no_license | shybeeJD/wine_admin | 1a6c465c326d3d519db000fb6b924deb3dd26397 | 87262be75fa7bcc8d48fbc21ea7b59cd95e13971 | refs/heads/master | 2023-08-24T03:31:44.176055 | 2021-10-04T14:25:08 | 2021-10-04T14:25:08 | 401,797,347 | 0 | 0 | null | 2021-09-03T16:33:33 | 2021-08-31T17:58:52 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for wine_admin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wine_admin.settings')
application = get_wsgi_application()
| [
"1639938697@qq.com"
] | 1639938697@qq.com |
371b70b5199d49ec2db85f7e1ccd506400ea44d0 | c2ae65792af1fab2e7843303ef90790819f872e8 | /testing/ds/bin/jupyter-troubleshoot | 7134b7d07b8dd90f5c6d6f159e2fc0a8167a0183 | [] | no_license | behappyyoung/PythonSampleCodes | 47c224ca76ce509a03c8b75ef6b4bf7f49ebdd7f | f7640467273fa8ea3c7e443e798737ca5bcea6f9 | refs/heads/master | 2023-03-15T00:53:21.034605 | 2023-02-13T17:12:32 | 2023-02-13T17:12:32 | 26,919,763 | 3 | 3 | null | 2023-03-07T12:45:21 | 2014-11-20T15:57:16 | Python | UTF-8 | Python | false | false | 274 | #!/Users/s0199669/github/PythonSampleCodes/testing/ds/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.troubleshoot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"behappyyoung@gmail.com"
] | behappyyoung@gmail.com | |
e4987ae5f98f6ae7c5d175a1db16e518931677b5 | a3b327960e9435ea2193446b68cd0d5421b2d327 | /recursion/PrefixParserBase/main_program.py | 308fe1ea0db4b78d43058a0a5b4d0d03f8d3be66 | [] | no_license | Illugi317/gagnaskipan | b29063ad875d555a4d05d92b83a6047ddd4240fc | 3223106e8d68bbcc471e4653c2e96a34d7302f03 | refs/heads/master | 2023-03-29T18:38:42.479267 | 2021-03-22T19:08:09 | 2021-03-22T19:08:09 | 343,486,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import tokenizer
from tokenizer import Tokenizer
# throw/raise this exception if a
# division by zero occurs.
class DivisionByZero(Exception):
pass
# IMPLEMENT HERE!!!! -----------------------------------------------------
# This function is the actual recursive
# implementation of the prefix parser.
# The tokenizer is pass-by-object-reference
# so it will be the same instance of Tokenizer
# throughout the recursive run.
def prefix_parser_recursive(tokenizer):
token = tokenizer.get_next_token()
print(token) # debug lines
return 0
# This function makes the tokenizer
# and then calls the recursive function.
# It is often necessary to make a separate
# recursive function that takes and returns
# particular values that don't match the
# proper functions parameters and return value.
def prefix_parser(str_statement):
tokenizer = Tokenizer(str_statement)
return prefix_parser_recursive(tokenizer)
# This is a tester function to test that
# the output and/or error message from the
# prefix_parser function are correct.
def test_prefix_parser(str_statement):
str_print = str_statement.rstrip()
try:
str_print += " = " + str(prefix_parser(str_statement))
except DivisionByZero:
str_print += ": " + str("A division by zero occurred")
print(str_print)
# A few hard coded tests
test_prefix_parser("+ 3 20")
test_prefix_parser("+ / 3 - 21 20 * 2 40")
test_prefix_parser("+ / 3 - 20 20 * 2 40")
# This must be a relative path from the folder that you have open
# in Visual Studio Code. This particular path works if you open
# the folder PrefixParserBase directly
f = open("prefix_statements.txt", "r")
for line in f:
test_prefix_parser(line)
f.close()
| [
"illugi@illugi.com"
] | illugi@illugi.com |
d6f3ef2b5af85a6b9cb9421677956bf5484c8549 | a155e08f55bad461bd6f5de7e18aac93b298e2b7 | /work-at-olist/channels/tests/test_utils.py | 8af3e0bb3bb1107d05e4c9b10741fa56b32c236b | [] | no_license | joaorafaelm/work-at-olist | dbc054430371a4d2ca02e3e6bd86925e4c345b7c | 6cef733dd0b507f776f26bbaeb6bd2b401f0b612 | refs/heads/master | 2021-08-17T16:46:09.094861 | 2017-10-05T21:28:45 | 2017-10-05T21:28:45 | 96,346,170 | 0 | 0 | null | 2017-12-24T20:16:37 | 2017-07-05T17:49:04 | Python | UTF-8 | Python | false | false | 735 | py | """Test file for the utils functions and classes."""
from channels.utils import Attrgetter
from django.test import TestCase
class TestUtils(TestCase):
"""Tests for the utils functions and classes."""
def test_attrgetter(self):
"""Test if the Attrgetter class retrieves attributes correctly."""
obj = type('Dummy', (object,), {'attr1': 'something', 'attr2': 322})
self.assertEqual(Attrgetter('attr1')(obj), getattr(obj, 'attr1'))
self.assertEqual(
Attrgetter('attr1', 'attr2')(obj),
tuple([
getattr(obj, 'attr1'),
getattr(obj, 'attr2')
])
)
with self.assertRaises(TypeError):
Attrgetter({})(obj)
| [
"joaoraf@me.com"
] | joaoraf@me.com |
3571c8cc983bb908e5fefc686b7dd1d85062152c | 530201d1bf8370a94ddf6ffcffd0c256389b42c9 | /mazeclass.py | 9d240b9505411691b0fd735472fb78dd60b9e784 | [] | no_license | chefakshito/cs520 | 1169a714c1e93bfb546df62b71662ff307a8de98 | 97b81f619e6f54f5125d14b58f04faa325227bd1 | refs/heads/master | 2021-01-21T06:39:35.828236 | 2017-02-27T04:22:37 | 2017-02-27T04:22:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,431 | py | from random import randint
from PIL import Image
imgx = 500; imgy = 500
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
color = [(0,0, 0), (255, 255, 255)]
sx=101
sy=101;
nm=50;
maze = [[[0 for x in range(sx)] for y in range(sy)] for z in range(nm)]
dx=[0,1,0,-1]
dy=[-1,0,1,0]
"""
cx=randint(0,mx-1)
cy=randint(0,my-1)
stack.append((cx,cy))
print(stack)
"""
sState=[]
gState=[]
class mazeClass:
def __init__(self):
global imgx; global imgy;
global image;
global pixels;
global color;
global sx
global sy
global maze
global dx
global dy
global nm;
for x in range(nm):
stack = [(randint(0, sx - 1),randint(0, sy - 1))]
sState.append(stack[-1]) #The start state is assigned.
while len(stack) > 0:
(cx, cy) = stack[-1];
maze[x][cy][cx] = 1
# find a new cell to add
nlst = [] # list of available neighbors
for i in range(4):
ch = randint(0,11)
if ch<6:
choice=1
else:
choice=randint(0,11)
nx = cx + dx[i]; ny = cy + dy[i]
if nx >= 0 and nx < sx and ny >= 0 and ny < sy:
if maze[x][ny][nx] == 0:
# print(maze[x][ny][nx],'check1') #--CHECK--1--
if choice==1:
# print('Entered Choice 1') #--CHECK--3--
# of occupied neighbors must be 1
ctr = 0
for j in range(4):
ex = nx + dx[j]; ey = ny + dy[j]
if ex >= 0 and ex < sx and ey >= 0 and ey < sy:
if maze[x][ey][ex] == 1: ctr += 1
if ctr == 1: nlst.append(i)
if choice>1:
# print('Entered Choice 2') #--CHECK--4--
luck=randint(1,11)
# print(luck,"CHECK 5") #--CHECK--5--
if luck>choice:
nlst.append(i)
# if 1 or more neighbors available then randomly select one and move
# print(nlst,'check2') #--CHECK--2--
if len(nlst) > 0:
ir = nlst[randint(0, len(nlst) - 1)]
cx += dx[ir]; cy += dy[ir]
stack.append((cx, cy))
else: stack.pop()
#A random goal state is generated
while len(gState)!=x+1:
gx=randint(0,sx-1)
gy=randint(0,sy-1)
if maze[x][gx][gy]==1:
gState.append((gx,gy))
# # paint the maze
# for ky in range(imgy):
# for kx in range(imgx):
# pixels[kx, ky] = color[maze[x][sy * ky // imgy][sx * kx // imgx]]
# image.save("Maze_" + str(x) + ".png", "PNG")
def getMaze(self):
c = randint(0,50)
return (maze[c], c, sState[c], gState[c]);
| [
"="
] | = |
391a282e05a447f05ae685668d80e3ca37b0de3f | 1eb8426b9c4af7642ac1d7bfa0e7f9dd7fc6a619 | /copycat.py | d41684a2a391a9b7b50ce618aed2e93477d60ed4 | [
"Unlicense"
] | permissive | foshizzledawg/copycat | ec8841825443240bbc5c96dae9593a17e2e4ed74 | da9f440c229bde81a3c4c74a5366d964fb00398c | refs/heads/master | 2021-01-23T11:34:51.357148 | 2015-06-20T12:47:08 | 2015-06-20T12:47:08 | 29,200,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | from sys import argv
script, filename = argv
target = open(filename)
print(target.read())
| [
"foshizzledawg@users.noreply.github.com"
] | foshizzledawg@users.noreply.github.com |
4983aad5ddb2fb70df592f1c4dd4d89dfd244507 | 30cfcde7ef63dbd879568ac5088ea82319aa5093 | /cantons_districts_cr/models/canton.py | 2987ce8b6a283b76293c08dfe12f1c905f119e06 | [] | no_license | gabriumaa/OdooCV | 6cccc1ed7173c2385fa6bb0d6192fcfcc2cb03da | 9a9e06b30cdc5908d953c467fd6a5d64eeaf2bc8 | refs/heads/master | 2020-04-03T23:13:28.532005 | 2019-06-14T20:46:40 | 2019-06-14T20:46:40 | 155,623,222 | 0 | 1 | null | 2019-03-28T22:07:17 | 2018-10-31T21:04:48 | null | UTF-8 | Python | false | false | 575 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class Canton(models.Model):
_name = "res.country.state.canton"
_description = "Cantón"
name = fields.Char(string="Nombre cantón", required=True)
code = fields.Char(string="Código de cantón", size=4, required=True)
state_id = fields.Many2one(comodel_name="res.country.state", string="Provincia", domain="[('country_id','=',country_id)]", required=True, ondelete="restrict")
country_id = fields.Many2one(comodel_name="res.country", string="País", required=True, ondelete="restrict")
| [
"fmelendezg@ice.go.cr"
] | fmelendezg@ice.go.cr |
4f86ca68d750e927852514244aa44a8cc386ccfc | a5d6167ec3cd70c5c9e2e1dde209478ced4c625f | /neural_testbed/agents/factories/ensemble.py | 5193d3e19a5e7b2dde9811eeae63cd684d65debf | [
"Apache-2.0"
] | permissive | Aakanksha-Rana/neural_testbed | 914eafa443f1b34980d73a929ed858a6a3b7009d | cc2e3de49c29f29852c8cd5885ab54fb6e664e2e | refs/heads/master | 2023-08-10T15:57:05.926485 | 2021-10-12T16:50:49 | 2021-10-12T16:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | # python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods for ensemble agent."""
import dataclasses
from typing import Sequence
from enn import base as enn_base
from enn import losses
from enn import networks
from neural_testbed import base as testbed_base
from neural_testbed.agents import enn_agent
from neural_testbed.agents.factories import base as factories_base
import numpy as np
@dataclasses.dataclass
class VanillaEnsembleConfig:
num_ensemble: int = 100 # Size of ensemble
l2_weight_decay: float = 1. # Weight decay
adaptive_weight_scale: bool = True # Whether to scale with prior
hidden_sizes: Sequence[int] = (50, 50) # Hidden sizes for the neural network
num_batches: int = 1000 # Number of SGD steps
seed: int = 0 # Initialization seed
def make_agent(config: VanillaEnsembleConfig) -> enn_agent.VanillaEnnAgent:
"""Factory method to create a vanilla ensemble."""
def make_enn(prior: testbed_base.PriorKnowledge) -> enn_base.EpistemicNetwork:
return networks.make_einsum_ensemble_mlp_enn(
output_sizes=list(config.hidden_sizes) + [prior.num_classes],
num_ensemble=config.num_ensemble,
nonzero_bias=False,
)
def make_loss(prior: testbed_base.PriorKnowledge,
enn: enn_base.EpistemicNetwork) -> enn_base.LossFn:
del enn
single_loss = losses.combine_single_index_losses_as_metric(
# This is the loss you are training on.
train_loss=losses.XentLoss(prior.num_classes),
# We will also log the accuracy in classification.
extra_losses={'acc': losses.AccuracyErrorLoss(prior.num_classes)},
)
# Averaging over index
loss_fn = losses.average_single_index_loss(single_loss, config.num_ensemble)
# Adding weight decay
scale = config.l2_weight_decay / config.num_ensemble
scale /= prior.num_train
if config.adaptive_weight_scale:
scale *= np.sqrt(prior.temperature) * prior.input_dim
loss_fn = losses.add_l2_weight_decay(loss_fn, scale=scale)
return loss_fn
agent_config = enn_agent.VanillaEnnConfig(
enn_ctor=make_enn,
loss_ctor=make_loss,
num_batches=config.num_batches,
seed=config.seed,
)
return enn_agent.VanillaEnnAgent(agent_config)
def vanilla_sweep() -> Sequence[VanillaEnsembleConfig]:
sweep = []
for num_ensemble in [1, 3, 10, 30, 100]:
sweep.append(VanillaEnsembleConfig(num_ensemble))
return tuple(sweep)
def weight_sweep() -> Sequence[VanillaEnsembleConfig]:
sweep = []
for adaptive_weight_scale in [True, False]:
for l2_weight_decay in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]:
sweep.append(VanillaEnsembleConfig(
num_ensemble=30,
l2_weight_decay=l2_weight_decay,
adaptive_weight_scale=adaptive_weight_scale,
))
return tuple(sweep)
def combined_sweep() -> Sequence[VanillaEnsembleConfig]:
return tuple(vanilla_sweep()) + tuple(weight_sweep())
def paper_agent() -> factories_base.PaperAgent:
return factories_base.PaperAgent(
default=VanillaEnsembleConfig(),
ctor=make_agent,
sweep=combined_sweep,
)
| [
"noreply@google.com"
] | noreply@google.com |
c68d6ebbadb6d5ca9c872511c913b706c9693f5b | 6fb4419f219fcf2453becfd3fe2d31dca3401da6 | /get-influences.py | 6df1a95dda8a74b2d99570fca626c49ecff004b1 | [] | no_license | christopher-beckham/wiki-lang-influence | dccc04e3565a9df408353a247058a74a9c44f5bb | 9c2832cafc5d5c25f39aff739b0004af08a5234b | refs/heads/master | 2020-04-14T23:53:33.941193 | 2014-06-19T09:57:59 | 2014-06-19T09:57:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/python
from cz import cz
import sys
import re
import time
import urllib2
from sys import stdin
def get_langs(st):
st = "".join(cz.striphtml(st))
st = re.sub('\\[.*?\\]', '', st).replace('\n', '')
st = st.split(',')
st = [ st[0] ] + [ name[1::] for name in st[1::] ]
return st
def fe(arr):
print ",".join(arr)
for url in stdin.readlines():
try:
url = url.rstrip()
body = cz.geturl(url)
print url[ url.rfind('/')+1 :: ].replace("_(programming_language)","")
in_by = cz.getbetween2(body, '<th scope="row" style="text-align:left;">Influenced by</th>', '</tr>')
if len(in_by) > 0:
in_by = get_langs(in_by[0])
in_by = [ val.encode('ascii','ignore') for val in in_by ]
fe(in_by)
else:
print
in_to = cz.getbetween2(body, '<th scope="row" style="text-align:left;">Influenced</th>', '</tr>')
if len(in_to) > 0:
in_to = get_langs(in_to[0])
in_to = [ val.encode('ascii','ignore') for val in in_to ]
fe(in_to)
else:
print
except urllib2.HTTPError as e:
print "DONT_USE"
print
print
time.sleep(0.2) | [
"chrispy645@gmail.com"
] | chrispy645@gmail.com |
828ba88e8fa668802482e976be1d35158909f3e7 | 5d907c7eaf77bdd0395d93beea57d26b18ee0864 | /test/log.py | 64ddd432a0300365498c784262d2d3dedda51bc9 | [] | no_license | wangfeng7399/task | 3fc4e580d77837dc39985fad8ca2018fdbbec11a | f58b51334a61b0455b83d4ea046fa0eae48d25ea | refs/heads/master | 2021-01-09T20:52:59.792319 | 2017-02-27T02:43:47 | 2017-02-27T02:43:47 | 58,896,542 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | #!/bin/env python3
#coding:utf-8
import os
import time
import re
import MySQLdb
def mysqlinsert(sql):
conn=MySQLdb.Connect(host="10.10.3.56",user="root",passwd="task",db="task",charset="utf8")
cur = conn.cursor()
Recoue=cur.execute(sql)
conn.commit()
cur.close()
conn.close()
t=time.strftime("%Y-%m-%d",time.localtime())
insettime=time.strftime("%Y-%m-%d",time.localtime(time.time()-86400))
for i in os.listdir("/var/www/html/history/{0}".format(t)):
l=i.split("_")[1:3]
print(l)
database=("-".join(l))
try:
with open("/var/www/html/history/{0}/{1}".format(t,i),"r+")as f:
lines=f.read()
linelist=re.split("# Query \\d+",lines)
for line in linelist:
times=re.findall("# Count(.*?)# Lock time",line,re.S)
if times:
times=times[0].split()
sqlall=re.findall("((select|insert|SELECT).*?\\\\G)",line,re.S)
if sqlall:
sql=list(set(sqlall))[0][0].strip()
insertsql='insert into web_slow(date,datab,maxtime,avgtime,mintime,insql,count) VALUES("{0}","{1}","{2}","{3}","{4}","{5}",{6})'.\
format(insettime,database,times[8],times[9],times[7],sql,times[1])
#print(insertsql)
mysqlinsert(insertsql)
except Exception:
with open("/var/www/html/history/{0}/{1}".format(t,i),"r+",encoding="gbk")as f:
try:
lines=f.read()
linelist=re.split("# Query \\d+",lines)
for line in linelist:
times=re.findall("# Count(.*?)# Lock time",line,re.S)
if times:
times=times[0].split()
sqlall=re.findall("((select|insert|SELECT).*?\\\\G)",line,re.S)
if sqlall:
sql=list(set(sqlall))[0][0].strip()
insertsql='insert into web_slow(date,datab,maxtime,avgtime,mintime,insql,count) VALUES("{0}","{1}","{2}","{3}","{4}","{5}",{6})'.\
format(insettime,database,times[8],times[9],times[7],sql,times[1])
#print(insertsql)
mysqlinsert(insertsql)
except Exception:
pass
| [
"wangfeng17399@163.com"
] | wangfeng17399@163.com |
a1fd5d1ba5523c8ee5579338e6ee4707b5c82688 | a89dfda3732eb73863b3e2fb1ebb46f1cb40973a | /txweb/lib/str_request.py | 0f90d8107e18a716cf7df54ddaea62f846c72d1f | [
"MIT"
] | permissive | devdave/txWeb | 543ccb7be0671a5e83959bb7cfc8e7804f04a74a | e447fbefd16134cb2f83323c04c20c41638d7da3 | refs/heads/master | 2022-12-15T18:11:50.880675 | 2021-03-24T18:48:16 | 2021-03-24T18:48:16 | 2,116,693 | 1 | 0 | MIT | 2022-12-08T04:28:41 | 2011-07-28T03:55:43 | Python | UTF-8 | Python | false | false | 12,567 | py | """
STATUS PENDING
Redo web Request to act as a str<->bytes proxy between our
application and twisted library.
Since Py3, all strings are unicode which is problematic for twisted as it
only works with bytes (and to some extent ascii). Instead of rewriting the entire library
and bedazzling it with flaky string encode/decode logic, the twisted maintainers
enforced bytes (or gtfo) only.
In this case, I am making a proxy request to catch str and convert to bytes before it moves upward
and into the twisted library. Unfortunately this is a doozy of a sub-project as its not just Request but also
headers logic.
"""
from __future__ import annotations
# import cgi
import json
from urllib.parse import parse_qs
import typing as T
from twisted.web.server import Request, NOT_DONE_YET
# from twisted.web.server import supportedMethods
from twisted.web.http import FOUND
from twisted.web import resource
from twisted.python.compat import intToBytes
from werkzeug.formparser import FormDataParser
from werkzeug.datastructures import MultiDict
from werkzeug.datastructures import FileStorage
from ..log import getLogger
from ..http_codes import HTTP500
log = getLogger(__name__)
class StrRequest(Request):
"""
Request is actually a merger of three different topics.
1. StrRequest contains all of the request data: headers & request body.
2. StrRequest holds the connection API.
3. StrRequest holds the response headers, http code, and response body until finalization.
"""
NOT_DONE_YET: T.Union[int, bool] = NOT_DONE_YET
def __init__(self, *args, **kwargs):
Request.__init__(self, *args, **kwargs)
# self.args = {} is already defined in Request's init
self.form = {} # type: T.Dict[str, str]
self.files = {} # type: T.Dict[str, FileStorage]
self._call_before_render = None
self._call_after_render = None
def getCookie(self, cookie_name: T.Union[str, bytes]) -> T.Union[str, bytes]:
"""
Wrapper around Request's getCookie to convert to and from byte strings
to unicode/str's
Parameters
----------
cookie_name: str
Returns
-------
If cookie_name argument is bytes, returns a byte string else returns str/unicode string
"""
expect_bytes = isinstance(cookie_name, bytes)
if expect_bytes:
return Request.getCookie(self, cookie_name)
else:
byte_name = cookie_name.encode("ascii")
retval = Request.getCookie(self, byte_name)
if retval is not None:
return retval.decode("utf-8")
else:
return None
def add_before_render(self, func):
"""
Utility intended solely to make testing easier
"""
self._call_before_render = func
return func
def add_after_render(self, func):
"""
Utility intended solely to make testing easier
"""
self._call_after_render = func
return func
def write(self, data: T.Union[bytes, str]):
"""
Wrapper to prevent unicode/str's from going to Request's write method
"""
if isinstance(data, str):
data = data.encode("utf-8")
elif isinstance(data, bytes) is False:
raise ValueError(f"Attempting to write to transport {type(data)}-{data!r}"
" must be bytes or Str")
return Request.write(self, data)
def writeTotal(self, response_body: T.Union[bytes, str], code: T.Union[int, str, bytes] = None,
message: T.Union[bytes, str] = None) -> T.NoReturn:
"""
Utility to write and then close the connection in one go.
Especially useful for error handling events.
Parameters
----------
response_body:
Content intended to be sent to the client browser
code:
Optional HTTP Code to use
message:
Optional HTTP response message to use
"""
content_length = intToBytes(len(response_body))
self.setHeader("Content-Length", content_length)
if code is not None:
self.setResponseCode(code, message=message)
self.write(response_body)
self.ensureFinished()
def writeJSON(self, data: T.Dict):
"""
Utility to take a dictionary and convert it to a JSON string
"""
payload = json.dumps(data)
content_length = intToBytes(len(payload))
self.setHeader("Content-Type", "application/json")
self.setHeader("Content-Length", content_length)
return self.write(payload)
def setHeader(self, name: T.Union[str, bytes], value: T.Union[str, bytes]):
"""
A quick wrapper to convert unicode inputs to utf-8 bytes
Set's a header for the RESPONSE
Parameters
----------
name:
A valid HTTP header
value
Syntactically correct value for the provided header name
"""
if isinstance(name, str):
name = name.encode("utf-8")
if isinstance(value, str):
value = value.encode("utf-8")
return Request.setHeader(self, name, value)
def setResponseCode(self,
code: int = 500,
message: T.Optional[T.Union[str, bytes]] = b"Failure processing request") -> T.NoReturn:
"""
Str to unicode wrapper around twisted.web's Request class.
Parameters
----------
code
message
Returns
-------
"""
if message and not isinstance(message, bytes):
message = message.encode("utf-8")
Request.setResponseCode(self, code, message)
def ensureFinished(self) -> None:
"""
Ensure's the connection has been flushed and closed without throwing an error.
"""
if self.finished not in [1, True]:
self.finish()
def requestReceived(self, command, path, version):
"""
Looks for POST'd arguments in form format (eg multipart).
Allows for file uploads and adds them to .args
"""
self.content.seek(0, 0)
self.args = {}
self.form = {}
self.method, self.uri = command, path
self.clientproto = version
x = self.uri.split(b"?", 1)
if len(x) == 1:
self.path = self.uri
else:
self.path, arg_string = x
self.args = parse_qs(arg_string.decode())
ctype = self.requestHeaders.getRawHeaders(b'content-type')
clength = self.requestHeaders.getRawHeaders(b'content-length')
if ctype is not None:
ctype = ctype[0]
if clength is not None:
clength = clength[0]
if self.method == b"POST" and ctype and clength:
self._processFormData(ctype, clength)
self.content.seek(0, 0)
# Args are going to userland, switch bytes back to str
query_args = self.args.copy()
def query_iter(arguments):
for key, values in arguments.items():
key = key.decode("utf-8") if isinstance(key, bytes) else key
for val in values:
val = val.decode("utf-8") if isinstance(val, bytes) else val
yield key, val
self.args = MultiDict(list(query_iter(query_args)))
self.process()
@property
def methodIsPost(self) -> bool:
"""
Utility method
Returns
-------
bool - Is the current request a POST request
"""
return self.method == b"POST"
@property
def methodIsGet(self) -> bool:
"""
Utility method
Returns
-------
True if the current request is a HTTP GET request.
"""
return self.method == b"GET"
def render(self, resrc: resource.Resource) -> None:
"""
Ask a resource to render itself unless a prefilter returns a string/bytes
body which will be rendered instead.
Parameters
----------
resrc: Resource
The resource to be rendered.
Returns
-------
None, output is written directly to the underlying HTTP channel.
"""
body = None
if self._call_before_render is not None:
body = self._call_before_render(self)
if body is None:
body = resrc.render(self)
if self._call_after_render is not None:
self._call_after_render(self, body)
# TODO deal with HEAD requests or leave it to the Application developer to deal with?
if body is NOT_DONE_YET:
return
if not isinstance(body, bytes):
log.error(
f"<{type(resrc)}{resrc!r}>"
f"- uri={self.uri} returned {type(body)}:{len(body)} but MUST return a byte string")
raise HTTP500()
if self.method == b"HEAD":
if len(body) > 0:
# This is a Bad Thing (RFC 2616, 9.4)
self._log.info(
"Warning: HEAD request {slf} for resource {resrc} is"
" returning a message body. I think I'll eat it.",
slf=self,
resrc=resrc
)
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(b'')
else:
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(body)
self.finish()
def _processFormData(self, content_type, content_length):
"""
Processes POST requests and puts POST'd arguments into args.
Thank you Cristina - http://www.cristinagreen.com/uploading-files-using-twisted-web.html
TODO this can be problematic if a large binary file is being uploaded
TODO verify Twisted HTTP channel/transport blows up if file upload size is "too big"
"""
options = {}
if isinstance(content_type, bytes):
content_type = content_type.decode("utf-8") # type: str
if ";" in content_type:
# TODO Possible need to replace some of the header processing logic as boundary part of content-type
# leaks through. eg "Content-type": "some/mime_type;boundary=----BLAH"
content_type, boundary = content_type.split(";", 1)
if "=" in boundary:
_, boundary = boundary.split("=", 1)
options['boundary'] = boundary
content_length = int(content_length)
self.content.seek(0, 0)
parser = FormDataParser()
_, self.form, self.files = parser.parse(self.content, content_type, content_length, options=options)
self.content.seek(0, 0)
def processingFailed(self, reason):
"""
Start of the error handling chain that leads from here all the way up to Application.processingFailed
:param reason:
:return:
"""
self.site.processingFailed(self, reason)
@property
def json(self) -> T.Any:
"""
Is this a JSON posted request?
Returns
-------
Ideally returns a dict object as I cannot think of what else a sane client would send in JSON format.
"""
if self.getHeader("Content-Type") in ["application/json", "text/json"]:
return json.loads(self.content.read())
else:
return None
def get_json(self) -> T.Any:
"""
Intended to mimic Flask api
Returns
-------
dict - a json decoded object
"""
return self.json
def redirect(self, url: T.Union[str, bytes], code=FOUND) -> T.NoReturn:
"""
Utility function that does a redirect.
Set the response code to L{FOUND} and the I{Location} header to the
given URL.
The request should have C{finish()} called after this.
Parameters
----------
url: bytes
What to set the LOCATION http response header to
code: int
What to set the HTTP response code to (eg 3xx)
"""
self.setResponseCode(code)
self.setHeader(b"location", url)
#self.ensureFinished()
| [
"devdave@ominian.net"
] | devdave@ominian.net |
d0eecd2114502f42f828a8d2ecbec82f6dafb9b5 | 3e00e19a1b8ab53a525ce358eeed276899a044e0 | /craigslistscraper/scraper.py | c6a73260fb89ab47c58bda51170ab2d85fa5d15c | [
"MIT"
] | permissive | thirtyHP/CraigslistScraper | c910619d2b8a6ccaf19ae68c55a28453be18a3e7 | ab2c3d6454d1f36332947ac59723ddb120b258d6 | refs/heads/master | 2023-08-23T17:36:25.706407 | 2021-09-21T18:31:41 | 2021-09-21T18:31:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | from craigslistscraper import domain
import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
class CraigslistSearches:
"""
Object that pulls all relevent ad information and returns them
in arrays to be parsed to a JSON file.
"""
def __init__(self, domain_get):
self.page = requests.get(domain_get)
self.soup = BeautifulSoup(self.page.content, 'html.parser')
def posting_title(self):
"""
Retuns the Posting Title of the ad in the form of a string.
"""
posting_title_raw = self.soup.find_all(class_='result-title hdrlnk')
posting_title = [item.get_text() for item in posting_title_raw]
return posting_title
def price(self):
"""
Returns Price of ad in form $'price' with the dollar sign included.
"""
prices_raw = self.soup.find_all(class_='result-meta')
price = [item.find(class_='result-price').get_text() for item in prices_raw]
return price
def ad_href(self):
"""
Returns a sting of the link to an ad.
"""
raw = self.soup.find_all(class_='result-row')
ad_link_raw = [item.find('a') for item in raw]
ad_link = [items.get('href') for items in ad_link_raw]
return ad_link
def posting_details(self):
"""
Retuns an array of all the Posting Details and Description in an array.
"""
posting_details = []
description = []
for url in self.ad_href():
ad_page = requests.get(url)
soup = BeautifulSoup(ad_page.content, 'html.parser')
ad_info = soup.select('span')
data = []
unorganized_data_info = []
for info in ad_info: # only keep elements that don't have a 'class' or 'id' attribute
if not (info.has_attr('class') or info.has_attr('id')):
data.append(info)
for d in data:
unorganized_data_info.append(d.text.split(': '))
description_raw = soup.find_all(id='postingbody')
for item in description_raw:
unfiltered = item.get_text(strip=True)
description.append(unfiltered.strip('QR Code Link to This Post'))
posting_details.append(unorganized_data_info)
return posting_details, description
def display(self):
"""
Displays data pulled from search in terminal, and
puts data into 'search_info.csv'.
"""
data = pd.DataFrame( # Displays data
{
'Name:': self.name(),
'Price:': self.price(),
'HREF:': self.ad_href()
})
# Parses data into 'search_info.csv'
data.to_csv('data/search_info.csv', index=False, mode='a')
if data.empty is True:
print('No Results')
else:
print(data)
| [
"ryanirl@icloud.com"
] | ryanirl@icloud.com |
d7d700537ef471799c3f698252d1209eb49e897c | dc92202c0033ee7f330b558743913a9c0d06ae5c | /certificate_formatter.py | d6c045186a6c44bb68bbe91e9e6cacc5e167bc1f | [] | no_license | DasHaSneg/Checkblockchaindiplomas | ca091e881313ace7833d2f05c2fe263be1592b78 | 4501bcb5c0baa8342f6add1ff08fa19940fd3f6b | refs/heads/master | 2022-12-10T01:43:54.680061 | 2021-10-05T06:40:42 | 2021-10-05T06:40:42 | 186,687,853 | 0 | 0 | null | 2022-12-08T05:14:08 | 2019-05-14T19:35:22 | JavaScript | UTF-8 | Python | false | false | 1,575 | py | import helpers
from cert_core import BlockchainType
def certificate_to_award(displayable_certificate):
tx_url = helpers.get_tx_lookup_chain(displayable_certificate.chain, displayable_certificate.txid)
award = {
'logoImg': displayable_certificate.issuer.image,
'name': displayable_certificate.recipient_name,
'title': displayable_certificate.title,
'organization': displayable_certificate.issuer.name,
'text': displayable_certificate.description,
'issuerID': displayable_certificate.issuer.id,
'chain': get_displayable_blockchain_type(displayable_certificate.chain.blockchain_type),
'transactionID': displayable_certificate.txid,
'transactionIDURL': tx_url,
'issuedOn': displayable_certificate.issued_on.strftime('%Y-%m-%d')
}
if displayable_certificate.signature_image:
award['signatureImg'] = displayable_certificate.signature_image[0].image
if displayable_certificate.subtitle:
award['subtitle'] = displayable_certificate.subtitle
return award
def get_formatted_award_and_verification_info(cert_store, certificate_uid):
certificate_model = cert_store.get_certificate(certificate_uid)
award = certificate_to_award(certificate_model)
verification_info = {
'uid': str(certificate_uid)
}
return award, verification_info
def get_displayable_blockchain_type(chain):
if chain == BlockchainType.bitcoin:
return 'Bitcoin'
elif chain == BlockchainType.mock:
return 'Mock'
else:
return None
| [
"dashasneg@mail.ru"
] | dashasneg@mail.ru |
25f478d4079115b4defb2e4b4b853c3a75b52a9e | d4bb84c8a805f297f80e91ea0149244034743739 | /PyGame/Tanks/Tanks.py | 1d71c1b558c20e4b92392d40bbf74ffdb6f61a71 | [] | no_license | AcePjoez/Python | 53da5893ac7fa22648caef647ce24c697f4db11b | 4217ba59b24f9f2c74c472e00e58b2cc59d3ddc1 | refs/heads/master | 2021-04-15T18:26:20.703159 | 2018-03-24T18:16:58 | 2018-03-24T18:16:58 | 126,230,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,324 | py | import pygame
import time
import random
pygame.init()
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Tanks')
# icon = pygame.image.load("apple.png")
# pygame.display.set_icon(icon)
white = (255, 255, 255)
black = (0, 0, 0)
red = (200, 0, 0)
light_red = (255, 0, 0)
yellow = (200, 200, 0)
light_yellow = (255, 255, 0)
green = (34, 177, 76)
light_green = (0, 255, 0)
clock = pygame.time.Clock()
tankWidth = 40
tankHeight = 20
turretWidth = 5
wheelWidth = 5
ground_height = 35
smallfont = pygame.font.SysFont("comicsansms", 25)
medfont = pygame.font.SysFont("comicsansms", 50)
largefont = pygame.font.SysFont("comicsansms", 85)
# img = pygame.image.load('snakehead.png')
# appleimg = pygame.image.load('apple.png')
def score(score):
text = smallfont.render("Score: " + str(score), True, black)
gameDisplay.blit(text, [0, 0])
def text_objects(text, color, size="small"):
if size == "small":
textSurface = smallfont.render(text, True, color)
if size == "medium":
textSurface = medfont.render(text, True, color)
if size == "large":
textSurface = largefont.render(text, True, color)
return textSurface, textSurface.get_rect()
def text_to_button(msg, color, buttonx, buttony, buttonwidth, buttonheight, size="small"):
textSurf, textRect = text_objects(msg, color, size)
textRect.center = ((buttonx + (buttonwidth / 2)), buttony + (buttonheight / 2))
gameDisplay.blit(textSurf, textRect)
def message_to_screen(msg, color, y_displace=0, size="small"):
textSurf, textRect = text_objects(msg, color, size)
textRect.center = (int(display_width / 2), int(display_height / 2) + y_displace)
gameDisplay.blit(textSurf, textRect)
def tank(x, y, turPos):
x = int(x)
y = int(y)
possibleTurrets = [(x - 27, y - 2),
(x - 26, y - 5),
(x - 25, y - 8),
(x - 23, y - 12),
(x - 20, y - 14),
(x - 18, y - 15),
(x - 15, y - 17),
(x - 13, y - 19),
(x - 11, y - 21)
]
pygame.draw.circle(gameDisplay, black, (x, y), int(tankHeight / 2))
pygame.draw.rect(gameDisplay, black, (x - tankHeight, y, tankWidth, tankHeight))
pygame.draw.line(gameDisplay, black, (x, y), possibleTurrets[turPos], turretWidth)
pygame.draw.circle(gameDisplay, black, (x - 15, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 10, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 15, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 10, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 5, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x + 5, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x + 10, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x + 15, y + 20), wheelWidth)
return possibleTurrets[turPos]
def enemy_tank(x, y, turPos):
x = int(x)
y = int(y)
possibleTurrets = [(x + 27, y - 2),
(x + 26, y - 5),
(x + 25, y - 8),
(x + 23, y - 12),
(x + 20, y - 14),
(x + 18, y - 15),
(x + 15, y - 17),
(x + 13, y - 19),
(x + 11, y - 21)
]
pygame.draw.circle(gameDisplay, black, (x, y), int(tankHeight / 2))
pygame.draw.rect(gameDisplay, black, (x - tankHeight, y, tankWidth, tankHeight))
pygame.draw.line(gameDisplay, black, (x, y), possibleTurrets[turPos], turretWidth)
pygame.draw.circle(gameDisplay, black, (x - 15, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 10, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 15, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 10, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x - 5, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x + 5, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x + 10, y + 20), wheelWidth)
pygame.draw.circle(gameDisplay, black, (x + 15, y + 20), wheelWidth)
return possibleTurrets[turPos]
def game_controls():
gcont = True
while gcont:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
message_to_screen("Controls", green, -100, size="large")
message_to_screen("Fire: Spacebar", black, -30)
message_to_screen("Move Turret: Up and Down arrows", black, 10)
message_to_screen("Move Tank: Left and Right arrows", black, 50)
message_to_screen("Pause: P", black, 90)
button("play", 150, 500, 100, 50, green, light_green, action="play")
button("Main", 350, 500, 100, 50, yellow, light_yellow, action="main")
button("quit", 550, 500, 100, 50, red, light_red, action="quit")
pygame.display.update()
clock.tick(15)
def button(text, x, y, width, height, inactive_color, active_color, action=None):
cur = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# print(click)
if x + width > cur[0] > x and y + height > cur[1] > y:
pygame.draw.rect(gameDisplay, active_color, (x, y, width, height))
if click[0] == 1 and action != None:
if action == "quit":
pygame.quit()
quit()
if action == "controls":
game_controls()
if action == "play":
gameLoop()
if action == "main":
game_intro()
else:
pygame.draw.rect(gameDisplay, inactive_color, (x, y, width, height))
text_to_button(text, black, x, y, width, height)
def pause():
paused = True
message_to_screen("Paused", black, -100, size="large")
message_to_screen("Press C to continue playing or Q to quit", black, 25)
pygame.display.update()
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
paused = False
elif event.key == pygame.K_q:
pygame.quit()
quit()
clock.tick(5)
def barrier(xlocation, randomHeight, barrier_width):
pygame.draw.rect(gameDisplay, black, [xlocation, display_height - randomHeight, barrier_width, randomHeight])
def explosion(x, y, size=50):
explode = True
while explode:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
startPoint = x, y
colorChoices = [red, light_red, yellow, light_yellow]
magnitude = 1
while magnitude < size:
exploding_bit_x = x + random.randrange(-1 * magnitude, magnitude)
exploding_bit_y = y + random.randrange(-1 * magnitude, magnitude)
pygame.draw.circle(gameDisplay, colorChoices[random.randrange(0, 4)], (exploding_bit_x, exploding_bit_y),
random.randrange(1, 5))
magnitude += 1
pygame.display.update()
clock.tick(100)
explode = False
def fireShell(xy, tankx, tanky, turPos, gun_power, xlocation, barrier_width, randomHeight, enemyTankX, enemyTankY):
fire = True
damage = 0
startingShell = list(xy)
print("FIRE!", xy)
while fire:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# print(startingShell[0],startingShell[1])
pygame.draw.circle(gameDisplay, red, (startingShell[0], startingShell[1]), 5)
startingShell[0] -= (12 - turPos) * 2
# y = x**2
startingShell[1] += int(
(((startingShell[0] - xy[0]) * 0.015 / (gun_power / 50)) ** 2) - (turPos + turPos / (12 - turPos)))
if startingShell[1] > display_height - ground_height:
print("Last shell:", startingShell[0], startingShell[1])
hit_x = int((startingShell[0] * display_height - ground_height) / startingShell[1])
hit_y = int(display_height - ground_height)
print("Impact:", hit_x, hit_y)
if enemyTankX + 15 > hit_x > enemyTankX - 15:
print("HIT TARGET!")
damage = 25
explosion(hit_x, hit_y)
fire = False
check_x_1 = startingShell[0] <= xlocation + barrier_width
check_x_2 = startingShell[0] >= xlocation
check_y_1 = startingShell[1] <= display_height
check_y_2 = startingShell[1] >= display_height - randomHeight
if check_x_1 and check_x_2 and check_y_1 and check_y_2:
print("Last shell:", startingShell[0], startingShell[1])
hit_x = int((startingShell[0]))
hit_y = int(startingShell[1])
print("Impact:", hit_x, hit_y)
explosion(hit_x, hit_y)
fire = False
pygame.display.update()
clock.tick(60)
return damage
def e_fireShell(xy, tankx, tanky, turPos, gun_power, xlocation, barrier_width, randomHeight, ptankx, ptanky):
damage = 0
currentPower = 1
power_found = False
while not power_found:
currentPower += 1
if currentPower > 100:
power_found = True
# print(currentPower)
fire = True
startingShell = list(xy)
while fire:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# pygame.draw.circle(gameDisplay, red, (startingShell[0],startingShell[1]),5)
startingShell[0] += (12 - turPos) * 2
startingShell[1] += int(
(((startingShell[0] - xy[0]) * 0.015 / (currentPower / 50)) ** 2) - (turPos + turPos / (12 - turPos)))
if startingShell[1] > display_height - ground_height:
hit_x = int((startingShell[0] * display_height - ground_height) / startingShell[1])
hit_y = int(display_height - ground_height)
# explosion(hit_x,hit_y)
if ptankx + 15 > hit_x > ptankx - 15:
print("target acquired!")
power_found = True
fire = False
check_x_1 = startingShell[0] <= xlocation + barrier_width
check_x_2 = startingShell[0] >= xlocation
check_y_1 = startingShell[1] <= display_height
check_y_2 = startingShell[1] >= display_height - randomHeight
if check_x_1 and check_x_2 and check_y_1 and check_y_2:
hit_x = int((startingShell[0]))
hit_y = int(startingShell[1])
# explosion(hit_x,hit_y)
fire = False
fire = True
startingShell = list(xy)
print("FIRE!", xy)
while fire:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# print(startingShell[0],startingShell[1])
pygame.draw.circle(gameDisplay, red, (startingShell[0], startingShell[1]), 5)
startingShell[0] += (12 - turPos) * 2
# y = x**2
gun_power = random.randrange(int(currentPower * 0.90), int(currentPower * 1.10))
startingShell[1] += int(
(((startingShell[0] - xy[0]) * 0.015 / (gun_power / 50)) ** 2) - (turPos + turPos / (12 - turPos)))
if startingShell[1] > display_height - ground_height:
print("last shell:", startingShell[0], startingShell[1])
hit_x = int((startingShell[0] * display_height - ground_height) / startingShell[1])
hit_y = int(display_height - ground_height)
print("Impact:", hit_x, hit_y)
if ptankx + 15 > hit_x > ptankx - 15:
print("HIT TARGET!")
damage = 25
explosion(hit_x, hit_y)
fire = False
check_x_1 = startingShell[0] <= xlocation + barrier_width
check_x_2 = startingShell[0] >= xlocation
check_y_1 = startingShell[1] <= display_height
check_y_2 = startingShell[1] >= display_height - randomHeight
if check_x_1 and check_x_2 and check_y_1 and check_y_2:
print("Last shell:", startingShell[0], startingShell[1])
hit_x = int((startingShell[0]))
hit_y = int(startingShell[1])
print("Impact:", hit_x, hit_y)
explosion(hit_x, hit_y)
fire = False
pygame.display.update()
clock.tick(60)
return damage
def power(level):
text = smallfont.render("Power: " + str(level) + "%", True, black)
gameDisplay.blit(text, [display_width / 2, 0])
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
intro = False
elif event.key == pygame.K_q:
pygame.quit()
quit()
gameDisplay.fill(white)
message_to_screen("Welcome to Tanks!", green, -100, size="large")
message_to_screen("The objective is to shoot and destroy", black, -30)
message_to_screen("the enemy tank before they destroy you.", black, 10)
message_to_screen("The more enemies you destroy, the harder they get.", black, 50)
# message_to_screen("Press C to play, P to pause or Q to quit",black,180)
button("play", 150, 500, 100, 50, green, light_green, action="play")
button("controls", 350, 500, 100, 50, yellow, light_yellow, action="controls")
button("quit", 550, 500, 100, 50, red, light_red, action="quit")
pygame.display.update()
clock.tick(15)
def game_over():
game_over = True
while game_over:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
message_to_screen("Game Over", green, -100, size="large")
message_to_screen("You died.", black, -30)
button("play Again", 150, 500, 150, 50, green, light_green, action="play")
button("controls", 350, 500, 100, 50, yellow, light_yellow, action="controls")
button("quit", 550, 500, 100, 50, red, light_red, action="quit")
pygame.display.update()
clock.tick(15)
def you_win():
win = True
while win:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
message_to_screen("You won!", green, -100, size="large")
message_to_screen("Congratulations!", black, -30)
button("play Again", 150, 500, 150, 50, green, light_green, action="play")
button("controls", 350, 500, 100, 50, yellow, light_yellow, action="controls")
button("quit", 550, 500, 100, 50, red, light_red, action="quit")
pygame.display.update()
clock.tick(15)
def health_bars(player_health, enemy_health):
if player_health > 75:
player_health_color = green
elif player_health > 50:
player_health_color = yellow
else:
player_health_color = red
if enemy_health > 75:
enemy_health_color = green
elif enemy_health > 50:
enemy_health_color = yellow
else:
enemy_health_color = red
pygame.draw.rect(gameDisplay, player_health_color, (680, 25, player_health, 25))
pygame.draw.rect(gameDisplay, enemy_health_color, (20, 25, enemy_health, 25))
def gameLoop():
gameExit = False
gameOver = False
FPS = 15
player_health = 100
enemy_health = 100
barrier_width = 50
mainTankX = display_width * 0.9
mainTankY = display_height * 0.9
tankMove = 0
currentTurPos = 0
changeTur = 0
enemyTankX = display_width * 0.1
enemyTankY = display_height * 0.9
fire_power = 50
power_change = 0
xlocation = (display_width / 2) + random.randint(-0.1 * display_width, 0.1 * display_width)
randomHeight = random.randrange(display_height * 0.1, display_height * 0.6)
while not gameExit:
if gameOver == True:
# gameDisplay.fill(white)
message_to_screen("Game Over", red, -50, size="large")
message_to_screen("Press C to play again or Q to exit", black, 50)
pygame.display.update()
while gameOver == True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
gameOver = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
gameLoop()
elif event.key == pygame.K_q:
gameExit = True
gameOver = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
tankMove = -5
elif event.key == pygame.K_RIGHT:
tankMove = 5
elif event.key == pygame.K_UP:
changeTur = 1
elif event.key == pygame.K_DOWN:
changeTur = -1
elif event.key == pygame.K_p:
pause()
elif event.key == pygame.K_SPACE:
damage = fireShell(gun, mainTankX, mainTankY, currentTurPos, fire_power, xlocation, barrier_width,
randomHeight, enemyTankX, enemyTankY)
enemy_health -= damage
possibleMovement = ['f', 'r']
moveIndex = random.randrange(0,2)
for x in range(random.randrange(0,10)):
if display_width*0.3 > enemyTankX > display_width*0.03:
if possibleMovement[moveIndex] == 'f':
enemyTankX += 5
elif possibleMovement[moveIndex] == 'r':
enemyTankX -= 5
gameDisplay.fill(white)
health_bars(player_health, enemy_health)
gun = tank(mainTankX, mainTankY, currentTurPos)
enemy_gun = enemy_tank(enemyTankX, enemyTankY, 8)
fire_power += power_change
power(fire_power)
barrier(xlocation, randomHeight, barrier_width)
gameDisplay.fill(green,
rect=[0, display_height - ground_height, display_width, ground_height])
pygame.display.update()
clock.tick(FPS)
damage = e_fireShell(enemy_gun, enemyTankX, enemyTankY, 8, 50, xlocation, barrier_width,
randomHeight, mainTankX, mainTankY)
player_health -= damage
elif event.key == pygame.K_a:
power_change = -1
elif event.key == pygame.K_d:
power_change = 1
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
tankMove = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
changeTur = 0
if event.key == pygame.K_a or event.key == pygame.K_d:
power_change = 0
mainTankX += tankMove
currentTurPos += changeTur
if currentTurPos > 8:
currentTurPos = 8
elif currentTurPos < 0:
currentTurPos = 0
if mainTankX - (tankWidth / 2) < xlocation + barrier_width:
mainTankX += 5
gameDisplay.fill(white)
health_bars(player_health, enemy_health)
gun = tank(mainTankX, mainTankY, currentTurPos)
enemy_gun = enemy_tank(enemyTankX, enemyTankY, 8)
fire_power += power_change
if fire_power > 100:
fire_power = 100
elif fire_power < 1:
fire_power = 1
power(fire_power)
barrier(xlocation, randomHeight, barrier_width)
gameDisplay.fill(green, rect=[0, display_height - ground_height, display_width, ground_height])
pygame.display.update()
if player_health < 1:
game_over()
elif enemy_health < 1:
you_win()
clock.tick(FPS)
pygame.quit()
quit()
game_intro()
gameLoop() | [
"noreply@github.com"
] | noreply@github.com |
2383868cb40c9ad8cc904de78599d183924388d9 | 376888dc5005de12a4df2de7ee6a9407d6691fe5 | /jobs/migrations/0001_initial.py | cd17dfa206c4c4567b3fcbf2b8e2380792ed6615 | [] | no_license | SahilChoudhary22/django-portfolio-blog | 41e7d94cb6fab973d5f25992d46def6f6cb0198b | 30e282224303e5a55de1e253702b4211b938205c | refs/heads/master | 2023-05-01T17:42:11.080673 | 2020-06-10T19:00:27 | 2020-06-10T19:00:27 | 209,122,622 | 0 | 0 | null | 2023-04-21T20:37:23 | 2019-09-17T17:58:23 | HTML | UTF-8 | Python | false | false | 645 | py | # Generated by Django 2.2.5 on 2019-09-17 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
('github', models.URLField(default='https://github.com/', max_length=400)),
],
),
]
| [
"sahil.codes.it@gmail.com"
] | sahil.codes.it@gmail.com |
4df7849c6844bd581bb8841111f635cbbab50830 | 4dfd539c530c5cff6874f2fa0c06ffd893212ad3 | /tencentcloud/chdfs/v20201112/errorcodes.py | d4604add29d3d07f8131cc49457ff2038e6d3425 | [] | no_license | TencentCloud/tencentcloud-sdk-python-intl-en | aac605d1a0458b637ba29eb49f6f166fe844a269 | 042b4d7fb609d4d240728197901b46008b35d4b0 | refs/heads/master | 2023-09-01T19:39:27.436454 | 2023-09-01T04:02:15 | 2023-09-01T04:02:15 | 227,834,644 | 4 | 6 | null | 2023-07-17T08:56:56 | 2019-12-13T12:23:52 | Python | UTF-8 | Python | false | false | 3,853 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Operation failed.
FAILEDOPERATION = 'FailedOperation'
# The permission group has been bound.
FAILEDOPERATION_ACCESSGROUPBOUND = 'FailedOperation.AccessGroupBound'
# The account balance is insufficient.
FAILEDOPERATION_ACCOUNTINSUFFICIENTBALANCE = 'FailedOperation.AccountInsufficientBalance'
# The account identity is not verified.
FAILEDOPERATION_ACCOUNTUNAUTHENTICATED = 'FailedOperation.AccountUnauthenticated'
# The file system is not empty.
FAILEDOPERATION_FILESYSTEMNOTEMPTY = 'FailedOperation.FileSystemNotEmpty'
# The file system capacity after change is smaller than the currently used capacity.
FAILEDOPERATION_QUOTALESSTHANCURRENTUSED = 'FailedOperation.QuotaLessThanCurrentUsed'
# Internal error.
INTERNALERROR = 'InternalError'
# Incorrect parameter.
INVALIDPARAMETER = 'InvalidParameter'
# Incorrect parameter value.
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Incorrect parameter value: AccessGroupId.
INVALIDPARAMETERVALUE_INVALIDACCESSGROUPID = 'InvalidParameterValue.InvalidAccessGroupId'
# Incorrect parameter value: AccessGroupName.
INVALIDPARAMETERVALUE_INVALIDACCESSGROUPNAME = 'InvalidParameterValue.InvalidAccessGroupName'
# Incorrect parameter value: `Address` of the permission rule.
INVALIDPARAMETERVALUE_INVALIDACCESSRULEADDRESS = 'InvalidParameterValue.InvalidAccessRuleAddress'
# Incorrect parameter value: CapacityQuota.
INVALIDPARAMETERVALUE_INVALIDCAPACITYQUOTA = 'InvalidParameterValue.InvalidCapacityQuota'
# Incorrect parameter value: Description.
INVALIDPARAMETERVALUE_INVALIDDESCRIPTION = 'InvalidParameterValue.InvalidDescription'
# Incorrect parameter value: FileSystemId.
INVALIDPARAMETERVALUE_INVALIDFILESYSTEMID = 'InvalidParameterValue.InvalidFileSystemId'
# Incorrect parameter value: FileSystemName.
INVALIDPARAMETERVALUE_INVALIDFILESYSTEMNAME = 'InvalidParameterValue.InvalidFileSystemName'
# Incorrect parameter value: MountPointId.
INVALIDPARAMETERVALUE_INVALIDMOUNTPOINTID = 'InvalidParameterValue.InvalidMountPointId'
# Incorrect parameter value: MountPointName.
INVALIDPARAMETERVALUE_INVALIDMOUNTPOINTNAME = 'InvalidParameterValue.InvalidMountPointName'
# Incorrect parameter value: VpcId.
INVALIDPARAMETERVALUE_INVALIDVPCID = 'InvalidParameterValue.InvalidVpcId'
# The quota limit is exceeded.
LIMITEXCEEDED = 'LimitExceeded'
# Missing parameter.
MISSINGPARAMETER = 'MissingParameter'
# The resource is in use.
RESOURCEINUSE = 'ResourceInUse'
# The resource does not exist.
RESOURCENOTFOUND = 'ResourceNotFound'
# The permission group does not exist.
RESOURCENOTFOUND_ACCESSGROUPNOTEXISTS = 'ResourceNotFound.AccessGroupNotExists'
# The permission rule does not exist.
RESOURCENOTFOUND_ACCESSRULENOTEXISTS = 'ResourceNotFound.AccessRuleNotExists'
# The file system does not exist.
RESOURCENOTFOUND_FILESYSTEMNOTEXISTS = 'ResourceNotFound.FileSystemNotExists'
# The mount point does not exist.
RESOURCENOTFOUND_MOUNTPOINTNOTEXISTS = 'ResourceNotFound.MountPointNotExists'
# The VPC does not exist.
RESOURCENOTFOUND_VPCNOTEXISTS = 'ResourceNotFound.VpcNotExists'
# The resource is unavailable.
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# Unauthorized operation.
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
| [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
630ff6a5ad626ea10a5e3ddb440d4b01416a9d3b | 0533d0ceb5966f7327f40d54bbd17e08e13d36bf | /python/LinkedList/Linked List Cycle II/Linked List Cycle II.py | 996a20582aa17746b392099fe2d2bb7ca6441e83 | [] | no_license | danwaterfield/LeetCode-Solution | 0c6178952ca8ca879763a87db958ef98eb9c2c75 | d89ebad5305e4d1a185b0c6f101a88691602b523 | refs/heads/master | 2023-03-19T01:51:49.417877 | 2020-01-11T14:17:42 | 2020-01-11T14:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = head
fast = head
step = 0
while slow and fast and fast.next:
slow = slow.next
fast = fast.next.next
step += 1
if slow == fast:
break
if not fast or not fast.next:
return None
slow2 = head
index = 0
while slow != slow2:
slow = slow.next
slow2 = slow2.next
index += 1
return slow | [
"zjuzjj@gmail.com"
] | zjuzjj@gmail.com |
b2001f4905ca18d64754a9a6aafb71893fbb0f10 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/Luca.Paterlini/C.py | cd79d12c9e0577d934dba12922fbf43c13a8215c | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,364 | py | import math
def AtkinSieve (limit):
results = [2,3,5]
sieve = [False]*(limit+1)
factor = int(math.sqrt(limit))+1
for i in range(1,factor):
for j in range(1, factor):
n = 4*i**2+j**2
if (n <= limit) and (n % 12 == 1 or n % 12 == 5):
sieve[n] = not sieve[n]
n = 3*i**2+j**2
if (n <= limit) and (n % 12 == 7):
sieve[n] = not sieve[n]
if i>j:
n = 3*i**2-j**2
if (n <= limit) and (n % 12 == 11):
sieve[n] = not sieve[n]
for index in range(5,factor):
if sieve[index]:
for jndex in range(index**2, limit, index**2):
sieve[jndex] = False
for index in range(7,limit):
if sieve[index]:
results.append(index)
return results
def conv_base(s,b,l):
r=0
for i in xrange(l):r=r*b+int(s[i])
return r
def lowest_div(n,ps):
for c in ps:
if n%c==0: return c
return -1
prime_sieve=AtkinSieve(10**6)
input()
N,J=map(int,raw_input().split())
u=0
print "Case #1:"
while J>0:
u+=1
q=bin(u)[2:]
s='1'+'0'*(N-2-len(q))+q+'1'
v=[]
for c in xrange(2,11): v.append(conv_base(s,c,N))
v=[lowest_div(x,prime_sieve) for x in v]
if all(i>0 for i in v):
print s,' '.join([str(x) for x in v]);J-=1
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
49c3b03d19632d9b4da234da479e74307f3bdbf9 | 1f8540dc62cfe27a9f6b0da5aefd40e814fb4b57 | /notifier/main/validators.py | 8d3024794fa1edbe35594d3a12a4af95ac640f4b | [] | no_license | fmarchenko/mailganer-test | 7506d349601189eed37d356dc06a053d5955931f | 3316f7769fac6096194b9eaf57cf7e54753de266 | refs/heads/master | 2022-07-25T09:47:37.068088 | 2019-06-10T19:03:39 | 2019-06-10T19:03:39 | 191,227,905 | 0 | 0 | null | 2022-07-06T20:10:40 | 2019-06-10T18:54:35 | Python | UTF-8 | Python | false | false | 1,215 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import lxml.etree
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.template.loader import render_to_string
from django.core.exceptions import ValidationError
from django.template.exceptions import TemplateSyntaxError, TemplateDoesNotExist
from .utils import render_template_from_string
__author__ = "Fedor Marchenko"
__email__ = "mfs90@mail.ru"
__date__ = "Jun 10, 2019"
def validate_template(value):
try:
render_template_from_string(value)
except (TemplateSyntaxError, TypeError):
raise ValidationError('Template syntax error')
def validate_template_file(template_name):
try:
validate_html(render_to_string(template_name))
except (TemplateSyntaxError, TypeError):
raise ValidationError('Template syntax error')
except TemplateDoesNotExist:
raise ValidationError('Template does not exist')
def validate_html(value):
parser = lxml.etree.HTMLParser(recover=False)
try:
lxml.etree.parse(StringIO(value), parser)
except lxml.etree.XMLSyntaxError:
raise ValidationError('Html syntax error')
| [
"mfs90@mail.ru"
] | mfs90@mail.ru |
33192420f4a034f07c13792859bfea745d17c1d1 | 55c6489732dc42ae760a51e154db1d66c6a6905e | /step2/step2_test.py | 7b03cefd58c467c2d03c8879212cbf42ccb5f029 | [] | no_license | keikosanjo/http_server | 85a9ea7269fad5a29ed331fca42d731c0bf4c04e | 0a27a734f044113a991ef2c4701197ebe38cc9bc | refs/heads/master | 2020-03-08T14:57:35.349501 | 2018-04-28T17:30:08 | 2018-04-28T17:30:08 | 128,198,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import unittest
from lucky_server_method import Method
class TestMethod(unittest.TestCase):
"""test class of lucky_server_method.py
"""
def test_get_statusline(self):
""" test method for get
"""
print("test case1 : hello")
expected1 = "HTTP/1.0 200 OK"
msg1 = "hello"
method1 = Method(msg1)
response1 = method1.get().splitlines()
actual1 = response1[0]
self.assertEqual(expected1, actual1)
def test_get_header(self):
print("test case2: hogehoge")
expected2 = "Content-Type: text/html; charset=UTF-8"
msg2 = "hogehoge"
method2 = Method(msg2)
response2 = method2.get().splitlines()
actual2 = response2[1]
self.assertEqual(expected2, actual2)
def test_get_msg(self):
print("test case3: hello world!")
expected3 = None
msg3 = "hello world!"
method3 = Method(msg3)
response3 = method3.get().splitlines()
actual3 = response3[6]
self.assertNotEqual(expected3, actual3)
if __name__ == "__main__":
unittest.main()
| [
"sanjo.keiko@fujitsu.com"
] | sanjo.keiko@fujitsu.com |
cdc1cfa4a1eb481d4e817b068541248211b778f2 | 1098e955e15dd259c9e92d653f2b489a702a5d96 | /catalog/migrations/0001_initial.py | cb8c480f565ff7f4f0b7637cb87174e21c4bc49e | [
"Apache-2.0"
] | permissive | ashik4715/thanosback | 40c320c95cdcb0cbd7823b76b3bc206585b35684 | 08db204dbda2672dd5a53c577c12899f39e73af0 | refs/heads/master | 2023-06-05T23:27:52.016748 | 2021-06-24T19:05:36 | 2021-06-24T19:05:36 | 380,023,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # Generated by Django 3.0.1 on 2021-06-21 19:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('genre', models.CharField(max_length=1000)),
('bio', models.CharField(max_length=2500)),
('release_date', models.DateField(verbose_name='date released')),
('review', models.IntegerField(default=0, validators=[django.core.validators.MaxLengthValidator(100), django.core.validators.MinLengthValidator(1)])),
],
),
]
| [
"ashikurjhalak@gmail.com"
] | ashikurjhalak@gmail.com |
5aab9762701d196ef905a7515cb2ae35400008cc | ebf3c9a10afb7199c75f6abd92cd65a12b06ebab | /net.py | 2aab45bed79d2be20622121ac5dd31d48c504ad8 | [] | no_license | bianjiang1234567/GPND | 0d887cab9876494efb73fdf107cecfa5e18c86cc | ce8b0cd7a3c707b87cb73745dcb50ea181a21604 | refs/heads/master | 2021-09-29T02:09:06.869422 | 2018-10-18T12:44:25 | 2018-10-18T12:44:25 | 159,532,954 | 1 | 0 | null | 2018-11-28T16:36:30 | 2018-11-28T16:36:30 | null | UTF-8 | Python | false | false | 6,411 | py |
import torch
from torch import nn
from torch.nn import functional as F
class VAE(nn.Module):
def __init__(self, zsize):
super(VAE, self).__init__()
d = 128
self.zsize = zsize
self.deconv1 = nn.ConvTranspose2d(zsize, d * 2, 4, 1, 0)
self.deconv1_bn = nn.BatchNorm2d(d * 2)
self.deconv2 = nn.ConvTranspose2d(d * 2, d * 2, 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d(d * 2)
self.deconv3 = nn.ConvTranspose2d(d * 2, d, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(d)
self.deconv4 = nn.ConvTranspose2d(d, 1, 4, 2, 1)
self.conv1 = nn.Conv2d(1, d // 2, 4, 2, 1)
self.conv2 = nn.Conv2d(d // 2, d * 2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d * 2)
self.conv3 = nn.Conv2d(d * 2, d * 4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d * 4)
self.conv4_1 = nn.Conv2d(d * 4, zsize, 4, 1, 0)
self.conv4_2 = nn.Conv2d(d * 4, zsize, 4, 1, 0)
def encode(self, x):
x = F.relu(self.conv1(x), 0.2)
x = F.relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.relu(self.conv3_bn(self.conv3(x)), 0.2)
h1 = self.conv4_1(x)
h2 = self.conv4_2(x)
return h1, h2
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
x = z.view(-1, self.zsize, 1, 1)
x = F.relu(self.deconv1_bn(self.deconv1(x)))
x = F.relu(self.deconv2_bn(self.deconv2(x)))
x = F.relu(self.deconv3_bn(self.deconv3(x)))
x = F.tanh(self.deconv4(x)) * 0.5 + 0.5
return x
def forward(self, x):
mu, logvar = self.encode(x)
mu = mu.squeeze()
logvar = logvar.squeeze()
z = self.reparameterize(mu, logvar)
return self.decode(z.view(-1, self.zsize, 1, 1)), mu, logvar
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
class Generator(nn.Module):
# initializers
def __init__(self, z_size, d=128, channels=1):
super(Generator, self).__init__()
self.deconv1_1 = nn.ConvTranspose2d(z_size, d*2, 4, 1, 0)
self.deconv1_1_bn = nn.BatchNorm2d(d*2)
self.deconv1_2 = nn.ConvTranspose2d(10, d*2, 4, 1, 0)
self.deconv1_2_bn = nn.BatchNorm2d(d*2)
self.deconv2 = nn.ConvTranspose2d(d*2, d*2, 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d(d*2)
self.deconv3 = nn.ConvTranspose2d(d*2, d, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(d)
self.deconv4 = nn.ConvTranspose2d(d, channels, 4, 2, 1)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
def forward(self, input):#, label):
x = F.relu(self.deconv1_1_bn(self.deconv1_1(input)))
x = F.relu(self.deconv2_bn(self.deconv2(x)))
x = F.relu(self.deconv3_bn(self.deconv3(x)))
x = F.tanh(self.deconv4(x)) * 0.5 + 0.5
return x
class Discriminator(nn.Module):
# initializers
def __init__(self, d=128, channels=1):
super(Discriminator, self).__init__()
self.conv1_1 = nn.Conv2d(channels, d//2, 4, 2, 1)
self.conv2 = nn.Conv2d(d // 2, d*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d*2)
self.conv3 = nn.Conv2d(d*2, d*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d*4)
self.conv4 = nn.Conv2d(d * 4, 1, 4, 1, 0)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
def forward(self, input):
x = F.leaky_relu(self.conv1_1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = F.sigmoid(self.conv4(x))
return x
class Encoder(nn.Module):
# initializers
def __init__(self, z_size, d=128, channels=1):
super(Encoder, self).__init__()
self.conv1_1 = nn.Conv2d(channels, d//2, 4, 2, 1)
self.conv2 = nn.Conv2d(d // 2, d*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d*2)
self.conv3 = nn.Conv2d(d*2, d*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d*4)
self.conv4 = nn.Conv2d(d * 4, z_size, 4, 1, 0)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
def forward(self, input):
x = F.leaky_relu(self.conv1_1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = self.conv4(x)
return x
class ZDiscriminator(nn.Module):
# initializers
def __init__(self, z_size, batchSize, d=128):
super(ZDiscriminator, self).__init__()
self.linear1 = nn.Linear(z_size, d)
self.linear2 = nn.Linear(d, d)
self.linear3 = nn.Linear(d, 1)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
def forward(self, x):
x = F.leaky_relu((self.linear1(x)), 0.2)
x = F.leaky_relu((self.linear2(x)), 0.2)
x = F.sigmoid(self.linear3(x))
return x
class ZDiscriminator_mergebatch(nn.Module):
# initializers
def __init__(self, z_size, batchSize, d=128):
super(ZDiscriminator_mergebatch, self).__init__()
self.linear1 = nn.Linear(z_size, d)
self.linear2 = nn.Linear(d * batchSize, d)
self.linear3 = nn.Linear(d, 1)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
def forward(self, x):
x = F.leaky_relu((self.linear1(x)), 0.2).view(1, -1) # after the second layer all samples are concatenated
x = F.leaky_relu((self.linear2(x)), 0.2)
x = F.sigmoid(self.linear3(x))
return x
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
| [
"erik.kj.kratz@gmail.com"
] | erik.kj.kratz@gmail.com |
873f399a3fc2fb55ed3c9320f9bdce8d298bc065 | 474e74c654916d0a1b0311fc80eff206968539b1 | /venv/Lib/site-packages/asposewordscloud/models/paragraph_link_collection_response.py | f18fa21cf6270818d46552834022303a45595eff | [] | no_license | viktor-tchemodanov/Training_Tasks_Python_Cloud | 4592cf61c2f017b314a009c135340b18fa23fc8f | b7e6afab4e9b76bc817ef216f12d2088447bd4cd | refs/heads/master | 2020-09-04T10:39:23.023363 | 2019-11-05T10:36:45 | 2019-11-05T10:36:45 | 219,712,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,084 | py | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="ParagraphLinkCollectionResponse.py">
# Copyright (c) 2018 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class ParagraphLinkCollectionResponse(object):
"""This response should be returned by the service when handling: GET http://api.aspose.com/v1.1/words/Test.doc/paragraphs
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'int',
'status': 'str',
'paragraphs': 'ParagraphLinkCollection'
}
attribute_map = {
'code': 'Code',
'status': 'Status',
'paragraphs': 'Paragraphs'
}
def __init__(self, code=None, status=None, paragraphs=None): # noqa: E501
"""ParagraphLinkCollectionResponse - a model defined in Swagger""" # noqa: E501
self._code = None
self._status = None
self._paragraphs = None
self.discriminator = None
if code is not None:
self.code = code
if status is not None:
self.status = status
if paragraphs is not None:
self.paragraphs = paragraphs
@property
def code(self):
"""Gets the code of this ParagraphLinkCollectionResponse. # noqa: E501
Response status code. # noqa: E501
:return: The code of this ParagraphLinkCollectionResponse. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ParagraphLinkCollectionResponse.
Response status code. # noqa: E501
:param code: The code of this ParagraphLinkCollectionResponse. # noqa: E501
:type: int
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def status(self):
"""Gets the status of this ParagraphLinkCollectionResponse. # noqa: E501
Response status. # noqa: E501
:return: The status of this ParagraphLinkCollectionResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ParagraphLinkCollectionResponse.
Response status. # noqa: E501
:param status: The status of this ParagraphLinkCollectionResponse. # noqa: E501
:type: str
"""
self._status = status
@property
def paragraphs(self):
"""Gets the paragraphs of this ParagraphLinkCollectionResponse. # noqa: E501
Collection of paragraphs # noqa: E501
:return: The paragraphs of this ParagraphLinkCollectionResponse. # noqa: E501
:rtype: ParagraphLinkCollection
"""
return self._paragraphs
@paragraphs.setter
def paragraphs(self, paragraphs):
"""Sets the paragraphs of this ParagraphLinkCollectionResponse.
Collection of paragraphs # noqa: E501
:param paragraphs: The paragraphs of this ParagraphLinkCollectionResponse. # noqa: E501
:type: ParagraphLinkCollection
"""
self._paragraphs = paragraphs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ParagraphLinkCollectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"vtchemodanov@hotmail.com"
] | vtchemodanov@hotmail.com |
b7e6ccbf91282fd4b1135b33210324ead1541bbf | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/577619_user_and_root_directory_logfile/recipe-577619.py | a1ea4b4ab355197464452fb26ca1eb8516cd6dac | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 339 | py | #! usr/bin/python
import dircache
import getpass
import time
logfile = open("spam.txt", "w+")
localtime = time.asctime( time.localtime(time.time()) )
print >> logfile, 'local current time :', localtime
usr = getpass.getuser()
print >> logfile, 'current user :' + usr
lst = dircache.listdir('/')
print >> logfile, lst
logfile.close()
| [
"betty@qburst.com"
] | betty@qburst.com |
3e237a3b618f6babfcc45fed3d29a91f5c1caf5e | 5cb9dccbcccb8a2137368dd0615fe3e3c7761707 | /simulations/kinova/build/moveit_ros_visualization/catkin_generated/pkg.installspace.context.pc.py | be8bbf82db3fed001402ea244273aafcf024b20f | [] | no_license | Simon-Steinmann/sim2real-modular-RL-project | b2467a393014e106043f6128a026f5eac934a83d | 4027590ac94de2d5c914731c09efcf2f318b9ca3 | refs/heads/master | 2020-07-29T01:30:56.450919 | 2019-10-12T09:33:00 | 2019-10-12T09:33:00 | 209,605,548 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/acis/sim2real/simulations/kinova/install/include;/usr/include/eigen3".split(';') if "/home/acis/sim2real/simulations/kinova/install/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "moveit_ros_planning_interface;moveit_ros_robot_interaction;object_recognition_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmoveit_motion_planning_rviz_plugin_core;-lmoveit_planning_scene_rviz_plugin_core;-lmoveit_robot_state_rviz_plugin_core;-lmoveit_rviz_plugin_render_tools;-lmoveit_trajectory_rviz_plugin_core".split(';') if "-lmoveit_motion_planning_rviz_plugin_core;-lmoveit_planning_scene_rviz_plugin_core;-lmoveit_robot_state_rviz_plugin_core;-lmoveit_rviz_plugin_render_tools;-lmoveit_trajectory_rviz_plugin_core" != "" else []
PROJECT_NAME = "moveit_ros_visualization"
PROJECT_SPACE_DIR = "/home/acis/sim2real/simulations/kinova/install"
PROJECT_VERSION = "1.0.1"
| [
"simon.steinmann91@gmail.com"
] | simon.steinmann91@gmail.com |
f61dd5f504fce6b9b5c5368af402735f80c34ca2 | 7d85c42e99e8009f63eade5aa54979abbbe4c350 | /game/lib/coginvasion/distributed/PlayGame.py | 82e93f8b2d73561ef231f29acb3acbd8bdb2e18f | [] | no_license | ToontownServerArchive/Cog-Invasion-Online-Alpha | 19c0454da87e47f864c0a5cb8c6835bca6923f0e | 40498d115ed716f1dec12cf40144015c806cc21f | refs/heads/master | 2023-03-25T08:49:40.878384 | 2016-07-05T07:09:36 | 2016-07-05T07:09:36 | 348,172,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,364 | py | # Filename: PlayGame.py
# Created by: blach (28Nov14)
from lib.coginvasion.globals import CIGlobals
from lib.coginvasion.distributed.CogInvasionMsgTypes import *
from direct.fsm.ClassicFSM import ClassicFSM
from direct.fsm.State import State
from direct.fsm.StateData import StateData
from direct.directnotify.DirectNotifyGlobal import directNotify
from lib.coginvasion.hood import ZoneUtil
from lib.coginvasion.hood import TTHood
from lib.coginvasion.hood import MGHood
from lib.coginvasion.hood import BRHood
from lib.coginvasion.hood import DLHood
from lib.coginvasion.hood import MLHood
from lib.coginvasion.hood import DGHood
from lib.coginvasion.hood import DDHood
from lib.coginvasion.hood import CTCHood
from lib.coginvasion.hood.QuietZoneState import QuietZoneState
from lib.coginvasion.dna.DNALoader import *
from panda3d.core import *
class PlayGame(StateData):
notify = directNotify.newCategory('PlayGame')
Hood2HoodClass = {CIGlobals.ToontownCentral: TTHood.TTHood,
CIGlobals.MinigameArea: MGHood.MGHood,
CIGlobals.TheBrrrgh: BRHood.BRHood,
CIGlobals.DonaldsDreamland: DLHood.DLHood,
CIGlobals.MinniesMelodyland: MLHood.MLHood,
CIGlobals.DaisyGardens: DGHood.DGHood,
CIGlobals.DonaldsDock: DDHood.DDHood,
CIGlobals.BattleTTC: CTCHood.CTCHood}
Hood2HoodState = {CIGlobals.ToontownCentral: 'TTHood',
CIGlobals.MinigameArea: 'MGHood',
CIGlobals.TheBrrrgh: 'BRHood',
CIGlobals.DonaldsDreamland: 'DLHood',
CIGlobals.MinniesMelodyland: 'MLHood',
CIGlobals.DaisyGardens: 'DGHood',
CIGlobals.DonaldsDock: 'DDHood',
CIGlobals.BattleTTC: 'CTCHood'}
def __init__(self, parentFSM, doneEvent):
StateData.__init__(self, "playGameDone")
self.doneEvent = doneEvent
self.fsm = ClassicFSM('World', [State('off', self.enterOff, self.exitOff, ['quietZone']),
State('quietZone', self.enterQuietZone, self.exitQuietZone, ['TTHood',
'BRHood', 'DLHood', 'MLHood', 'DGHood', 'DDHood', 'MGHood', 'CTCHood']),
State('TTHood', self.enterTTHood, self.exitTTHood, ['quietZone']),
State('BRHood', self.enterBRHood, self.exitBRHood, ['quietZone']),
State('DLHood', self.enterDLHood, self.exitDLHood, ['quietZone']),
State('MLHood', self.enterMLHood, self.exitMLHood, ['quietZone']),
State('DGHood', self.enterDGHood, self.exitDGHood, ['quietZone']),
State('DDHood', self.enterDDHood, self.exitDDHood, ['quietZone']),
State('MGHood', self.enterMGHood, self.exitMGHood, ['quietZone']),
State('CTCHood', self.enterCTCHood, self.exitCTCHood, ['quietZone'])],
'off', 'off')
self.fsm.enterInitialState()
self.parentFSM = parentFSM
self.parentFSM.getStateNamed('playGame').addChild(self.fsm)
self.hoodDoneEvent = 'hoodDone'
self.hood = None
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.quietZoneStateData = None
self.place = None
self.lastHood = None
self.suitManager = None
def enter(self, hoodId, zoneId, avId):
StateData.enter(self)
whereName = ZoneUtil.getWhereName(zoneId)
loaderName = ZoneUtil.getLoaderName(zoneId)
self.fsm.request('quietZone', [{'zoneId': zoneId,
'hoodId': hoodId,
'where': whereName,
'how': 'teleportIn',
'avId': avId,
'shardId': None,
'loader': loaderName}])
def exit(self):
StateData.exit(self)
def getCurrentWorldName(self):
return self.fsm.getCurrentState().getName()
def enterOff(self):
pass
def exitOff(self):
pass
def enterCTCHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitCTCHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.ToontownCentral
def enterDDHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitDDHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.DonaldsDock
def enterDGHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitDGHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.DaisyGardens
def enterMLHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitMLHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.MinniesMelodyland
def enterDLHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitDLHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.DonaldsDreamland
def enterBRHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitBRHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.TheBrrrgh
def enterTTHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitTTHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.ToontownCentral
def enterMGHood(self, requestStatus):
self.accept(self.hoodDoneEvent, self.handleHoodDone)
self.hood.enter(requestStatus)
def exitMGHood(self):
self.ignore(self.hoodDoneEvent)
self.hood.exit()
self.hood.unload()
self.hood = None
self.lastHood = CIGlobals.MinigameArea
def handleHoodDone(self):
doneStatus = self.hood.getDoneStatus()
if doneStatus['zoneId'] == None:
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
else:
self.fsm.request('quietZone', [doneStatus])
def loadDNAStore(self):
if hasattr(self, 'dnaStore'):
self.dnaStore.reset_nodes()
self.dnaStore.reset_hood_nodes()
self.dnaStore.reset_place_nodes()
self.dnaStore.reset_hood()
self.dnaStore.reset_fonts()
self.dnaStore.reset_DNA_vis_groups()
self.dnaStore.reset_textures()
self.dnaStore.reset_block_numbers()
self.dnaStore.reset_block_zones()
self.dnaStore.reset_suit_points()
del self.dnaStore
self.dnaStore = DNAStorage()
loadDNAFile(self.dnaStore, 'phase_4/dna/storage.pdna')
self.dnaStore.storeFont('humanist', CIGlobals.getToonFont())
self.dnaStore.storeFont('mickey', CIGlobals.getMickeyFont())
self.dnaStore.storeFont('suit', CIGlobals.getSuitFont())
loadDNAFile(self.dnaStore, 'phase_3.5/dna/storage_interior.pdna')
def enterQuietZone(self, requestStatus):
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone, [requestStatus])
self.acceptOnce('enteredQuietZone', self.handleEnteredQuietZone, [requestStatus])
self.quietZoneStateData = QuietZoneState(self.quietZoneDoneEvent, 0)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def handleEnteredQuietZone(self, requestStatus):
hoodId = requestStatus['hoodId']
hoodClass = self.Hood2HoodClass[hoodId]
base.transitions.noTransitions()
loader.beginBulkLoad('hood', hoodId, 100)
self.loadDNAStore()
self.hood = hoodClass(self.fsm, self.hoodDoneEvent, self.dnaStore, hoodId)
self.hood.load()
hoodId = requestStatus['hoodId']
hoodState = self.Hood2HoodState[hoodId]
self.fsm.request(hoodState, [requestStatus], exitCurrent = 0)
self.quietZoneStateData.fsm.request('waitForSetZoneResponse')
def handleQuietZoneDone(self, requestStatus):
self.hood.enterTheLoader(requestStatus)
self.hood.loader.enterThePlace(requestStatus)
loader.endBulkLoad('hood')
self.exitQuietZone()
def exitQuietZone(self):
self.ignore('enteredQuietZone')
self.ignore(self.quietZoneDoneEvent)
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
def setPlace(self, place):
self.place = place
def getPlace(self):
return self.place
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
ac6de3a9cebc01573b58f7b063a5ec27a52ba20c | c16d228002320d9cbffb888707799c6fd9733f5e | /comparer/common/session.py | b1a40dc0487a74658e19932f5ce6bee948c6d0ff | [] | no_license | vprusa/MediaWiki-to-DokuWiki-Importer | 45648ebc798ee5e4c1ae31b116fc01e2b04fd385 | 52f6a21de91e9eea4b01c04228b0bedbb58743ba | refs/heads/master | 2022-11-15T08:12:56.860237 | 2020-07-15T11:52:12 | 2020-07-15T11:52:12 | 279,854,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | from common.base_login import base_login
from conf.properties import properties
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.common.exceptions import StaleElementReferenceException
from common.ui_utils import ui_utils
import os
import logging
import logging.config
from selenium.webdriver.firefox.options import Options
class session(properties):
web_driver = None
logger = None
logging_level = logging.DEBUG
ui_utils = None
def __init__(self):
# call parent method to load properties from files
super(session, self).__init__()
self.__logger__()
self.logger.info("Test properties")
self.logger.info("Debug: " + self.DEBUG)
self.logger.info("PROPERTIES_FILE_NAME: " + self.PROPERTIES_FILE_NAME)
''' Get the Selenium Web Driver, and then navegate to ...'''
self.__get_web_driver__()
self.ui_utils = ui_utils(self)
def __get_web_driver__(self):
#self.logger.info(self.BROWSER)
self.logger.info("Using Browser: %s", self.BROWSER)
if "Firefox" in self.BROWSER:
profile = webdriver.FirefoxProfile()
# enable auto download
# http://stackoverflow.com/questions/24852709/how-do-i-automatically-download-files-from-a-pop-up-dialog-using-selenium-python
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.manager.showWhenStarting", False)
profile.set_preference("browser.download.dir", self.FIREFOX_DOWNLOAD_DIR)
profile.set_preference("browser.download.panel.shown", False)
#profile.set_preference("browser.helperApps.neverAsk.openFile","text/csv,application/vnd.ms-excel,text/plain, application/xls,application/x-xls,application/excel,application/x-excel,application/vnd.ms-excel,application/x-excel,application/x-msexcel")
#profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/csv,application/vnd.ms-excel,text/plain, application/xls,application/x-xls,application/excel,application/x-excel,application/vnd.ms-excel,application/x-excel,application/x-msexcel")
profile.accept_untrusted_certs = True
options = Options()
if "True" in self.HEADLESS:
options.add_argument("--headless")
binary = FirefoxBinary('./firefox/firefox')
#driver = webdriver.Firefox(firefox_binary=binary)
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
profile.set_preference("webdriver_accept_untrusted_certs", True)
desired_caps = DesiredCapabilities.FIREFOX.copy()
desired_caps.update({'acceptInsecureCerts': True, 'acceptSslCerts': True})
driver = webdriver.Firefox(capabilities=desired_caps, firefox_options=options, firefox_profile=profile, executable_path="./geckodriver", firefox_binary=binary)
#self.web_driver = getattr(webdriver,self.BROWSER)(firefox_profile=profile)
self.web_driver = driver
if "Chrome" in self.BROWSER:
driver = webdriver.Chrome(self.DRIVER_LOCATION) # Optional argument, if not specified will search path.
self.web_driver = driver
#self.web_driver.set_window_size(self.BROWSER_WIDTH, self.BROWSER_HEIGHT)
return
def __logger__(self):
self.logger = logging.getLogger('session')
if len(self.logger.handlers[:]) == 0:
self.logger.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
ch = logging.StreamHandler()
ch.setLevel(self.logging_level)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.info("Logger Handler created.")
else:
self.logger.info("Logger Handler already created.")
def propertiesByPrefix(self, prefix):
return [x for x in dir(self) if prefix in x ]
def close_web_driver(self):
if not "True" in self.KEEP_BROWSER_RUNNING:
# close browser window
# self.web_driver.close()
# close browser windows & exit webdriver
self.web_driver.quit()
def find_elements_by_xpath(self, xpath, retry = 10):
try:
return self.web_driver.find_elements_by_xpath(xpath)
except StaleElementReferenceException as e:
if retry <= 0:
raise StaleElementReferenceException()
return self.find_elements_by_xpath(xpath, retry-1)
return None
def find_element_by_xpath(self, xpath, retry = 10):
elems = self.find_elements_by_xpath(xpath, retry)
if not elems or len(elems) < 1:
return None
return elems[0]
def getattr(self, item, attr):
try:
ret = getattr(item, attr)
return ret
except AttributeError:
return ""
pass
| [
"prusa.vojtech@gmail.com"
] | prusa.vojtech@gmail.com |
a3d446308f50cb599ebfcc1c2196f6b228cae9ec | 9b7d309765c29be70044ade582f92c96abcc97e1 | /turnip/admin.py | 869ae73617c242f7b216e78d84de861ce8781210 | [] | no_license | Tholdan/animal_crossing_turnip | de6f3775bad37c565dce915c723bcee909aff8d1 | c78db10bfe154ff84d4891a54f9e2002ce61a5d1 | refs/heads/master | 2021-04-17T03:43:59.618984 | 2020-03-23T11:13:30 | 2020-03-23T11:13:30 | 249,409,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.contrib import admin
from turnip import models
admin.site.register(models.TurnipWeek)
admin.site.register(models.TurnipDailyCost)
| [
"ivanort97@gmail.com"
] | ivanort97@gmail.com |
049dbc9839f445afb09c852ed69a14d82a04e27b | 5d2c59ccd6fb387ac9e76d1530b27541c07a0393 | /backend/odas_backend/odas/serializers.py | 6dfed759e1bc2404795ca3783fd32dd74d9767cb | [] | no_license | maxorozco213/Operations-Data-and-Mgmt-System | 167c7cc35730b3af21b71a4c699c1dfc340acb5a | 1fc6e8a076886dad5732210f91873f6a09485221 | refs/heads/master | 2022-05-27T02:41:14.970986 | 2020-04-27T21:03:45 | 2020-04-27T21:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from rest_framework import serializers
from odas.models import Satellite, Component, Measurement, Units
# Lead Serializer
class SatelliteSerializer(serializers.ModelSerializer):
class Meta:
model = Satellite
fields = '__all__'
class ComponentSerializer(serializers.ModelSerializer):
class Meta:
model = Component
fields = '__all__'
class UnitsSerializer(serializers.ModelSerializer):
class Meta:
model = Units
fields = '__all__'
class MeasurementSerializer(serializers.ModelSerializer):
class Meta:
model = Measurement
fields = '__all__' | [
"jesus_r_mendoza@yahoo.com"
] | jesus_r_mendoza@yahoo.com |
f6fbc5a8b7742f6f45223f0e0e96d9c2a7b32065 | 85bd5522f5bc58fe0b6a93c93f31ce2b09fa9e10 | /Pi_monteCarlo.py | fbe75639eadb35e492ff9d4ce4a7a3b92b770dac | [] | no_license | jeremlp/Pi-approximation-Monte-Carlo | f72ff70adbf38c9c1954e10f70976c48a6e3c471 | 843c15df66e4badea38db1d16ebebdea7575fe06 | refs/heads/master | 2022-04-20T21:59:27.227547 | 2020-04-17T14:40:59 | 2020-04-17T14:40:59 | 256,528,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 02:40:37 2020
@author: Jeremy La Porte
Release V1.0
Pi approximation using Monte Carlo method
"""
from random import *
import matplotlib.pyplot as plt
data = 1000000
R=0
for i in range(data):
x,y = random(),random()
plt.scatter(x,y,c='r',s=0.4)
if x**2 + y**2 <= 1:
R= R+1
plt.scatter(x,y,c='b',s=0.4)
print('moy', 4*R/data)
| [
"noreply@github.com"
] | noreply@github.com |
f80430b48ec9e0b71e51fbfed5dd8c8bcdabbbe4 | 42e8c0992fd845237fa7b1baef494bfb6abc9dba | /ui/data_input_panel.py | 7dd2b4764971de4b2bd9fc109be70c082724291f | [] | no_license | mx1001/animation_nodes | b5ae336512bb43f40e6ca5276a4e05acb5fdc81b | b77b96d991f2b26c03bcbeef4a9fa8a09173ea4f | refs/heads/master | 2020-02-26T17:46:05.676451 | 2016-03-09T15:22:01 | 2016-03-09T15:22:01 | 54,067,761 | 5 | 0 | null | 2016-03-16T21:27:54 | 2016-03-16T21:27:54 | null | UTF-8 | Python | false | false | 554 | py | import bpy
from .. tree_info import getNodesByType
class DataInputPanel(bpy.types.Panel):
bl_idname = "an_data_input_panel"
bl_label = "Data Input"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "AN"
def draw(self, context):
layout = self.layout
nodes = getNodesByType("an_DataInputNode")
for node in nodes:
if not node.showInViewport: continue
socket = node.inputs[0]
socket.drawSocket(layout, text = node.label, drawType = "TEXT_PROPERTY_OR_NONE")
| [
"mail@jlucke.com"
] | mail@jlucke.com |
052c2a2cb51a4e27408d96c8675bf650c28a11d6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /3pzKWEg5oiaMemDdP_20.py | 8a2a185b2383fd368b4c800327192066c7c46a25 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py |
def most_expensive_item(products):
Things = []
for x in products.keys():
Things.append(x)
Worth = []
for y in products.values():
Worth.append(y)
Highest = max(Worth)
Counter = 0
Length = len(Things)
while (Counter < Length):
Item = Things[Counter]
Money = Worth[Counter]
if (Money == Highest):
return Item
else:
Counter += 1
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
740752810a2183122e52edfcad677fbd485bf6f7 | 17af1d72936b6f5340abcea54535db44c0b6cc20 | /TEBC-Net/relative_Transformer.py | 44ec18f762af20a6808f3e952cffa54bb4e976c0 | [] | no_license | azhe1234/TEBC-Net | 4961d0a91d3a0a8fd99d80e356b3a12902b65c40 | b1e2321db5901613a71304607583808eb1ef24a3 | refs/heads/master | 2023-04-10T03:17:18.670955 | 2021-03-04T01:18:23 | 2021-03-04T01:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,016 | py | import torch
from torch import nn
import torch.nn.functional as F
import math
class RelativeEmbedding(nn.Module):
def forward(self, input):
"""Input is expected to be of size [bsz x seqlen].
"""
bsz, seq_len = input.size()
max_pos = self.padding_idx + seq_len
if max_pos > self.origin_shift:
# recompute/expand embeddings if needed
weights = self.get_embedding(
max_pos * 2,
self.embedding_dim,
self.padding_idx,
)
weights = weights.to(self._float_tensor)
del self.weights
self.origin_shift = weights.size(0) // 2
self.register_buffer('weights', weights)
positions = torch.arange(-seq_len, seq_len).to(input.device).long() + self.origin_shift # 2*seq_len
embed = self.weights.index_select(0, positions.long()).detach()
return embed
class RelativeSinusoidalPositionalEmbedding(RelativeEmbedding):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1568):
"""
:param embedding_dim: 每个位置的dimension
:param padding_idx:
:param init_size:
"""
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
assert init_size % 2 == 0
weights = self.get_embedding(
init_size + 1,
embedding_dim,
padding_idx,
)
self.register_buffer('weights', weights)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(-num_embeddings // 2, num_embeddings // 2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
self.origin_shift = num_embeddings // 2 + 1
return emb
class RelativeMultiHeadAttn(nn.Module):
def __init__(self, d_model, n_head, dropout, r_w_bias=None, r_r_bias=None, scale=False):
"""
:param int d_model:
:param int n_head:
:param dropout: 对attention map的dropout
:param r_w_bias: n_head x head_dim or None, 如果为dim
:param r_r_bias: n_head x head_dim or None,
:param scale:
:param rel_pos_embed:
"""
super().__init__()
self.qkv_linear = nn.Linear(d_model, d_model * 3, bias=False)
self.n_head = n_head
self.head_dim = d_model // n_head
self.dropout_layer = nn.Dropout(dropout)
self.pos_embed = RelativeSinusoidalPositionalEmbedding(d_model // n_head, 0, 1200)
if scale:
self.scale = math.sqrt(d_model // n_head)
else:
self.scale = 1
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(n_head, d_model // n_head)))
self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(n_head, d_model // n_head)))
else:
self.r_r_bias = r_r_bias # r_r_bias就是v
self.r_w_bias = r_w_bias # r_w_bias就是u
def forward(self, x, mask):
"""
:param x: batch_size x max_len x d_model
:param mask: batch_size x max_len
:return:
"""
batch_size, max_len, d_model = x.size()
pos_embed = self.pos_embed(mask) # l x head_dim
qkv = self.qkv_linear(x) # batch_size x max_len x d_model3
q, k, v = torch.chunk(qkv, chunks=3, dim=-1)
q = q.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
k = k.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
v = v.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) # b x n x l x d
rw_head_q = q + self.r_r_bias[:, None]
AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q, k]) # b x n x l x d, n是head
D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias
B_ = torch.einsum('bnqd,ld->bnql', q, pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移
E_ = torch.einsum('bnqd,ld->bnql', k, pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias
BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len
BDE = self._shift(BD) + self._transpose_shift(E_)
attn = AC + BDE
attn = attn / self.scale
attn = attn.masked_fill(mask[:, None, None, :].eq(0), float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.dropout_layer(attn)
v = torch.matmul(attn, v).transpose(1, 2).reshape(batch_size, max_len, d_model) # b x n x l x d
return v
def _shift(self, BD):
"""
类似
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
转换为
0 1 2
-1 0 1
-2 -1 0
:param BD: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1)
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len
BD = BD[:, :, :-1].view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len
BD = BD[:, :, :, max_len:]
return BD
def _transpose_shift(self, E):
"""
类似
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
转换为
0 -10 -200
1 00 -100
2 10 000
:param E: batch_size x n_head x max_len x 2max_len
:return: batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = E.new_zeros(bsz, n_head, max_len, 1)
# bsz x n_head x -1 x (max_len+1)
E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len)
indice = (torch.arange(max_len) * 2 + 1).to(E.device)
E = E.index_select(index=indice, dim=-2).transpose(-1, -2) # bsz x n_head x max_len x max_len
return E | [
"120192227066@ncepu.edu.cn"
] | 120192227066@ncepu.edu.cn |
9f8a9aa777f4e0e856d4eb0612c94910818c0e3f | 203985cab249cfca2b93f66c1d8486e1574348b8 | /Applications/Applications/dock/reframe_scripts/dock_profilef_test2_nv.py | dd3e34c37e876eba6d652d1448d647ba7d8d0a53 | [] | no_license | arm-hpc-user-group/Cloud-HPC-Hackathon-2021 | 873078d634bba26088199fa0f6be36d87017b0f6 | afdd42e0f36afa0e0c73757beb6e2c975d9ca57b | refs/heads/main | 2023-04-10T00:58:37.212005 | 2022-05-20T18:49:20 | 2022-05-20T18:49:20 | 374,724,420 | 30 | 71 | null | 2021-09-10T10:27:06 | 2021-06-07T16:00:59 | APL | UTF-8 | Python | false | false | 4,187 | py | import reframe as rfm
import reframe.utility.sanity as sn
import reframe.core.launchers as rcl
import hackathon as hack
@rfm.simple_test
class DockfpTest2NV(hack.HackathonBase): ###################################################
# Where to run the binaries 'aws:c6gn' on Arm or 'aws:c5n' on Intel
valid_systems = ['aws:c6gn']
# valid_prog_environs = ['*']
# Logging Variables
log_team_name = 'Wolfpack'
log_app_name = 'dock'
log_test_name = 'dock_test2' ############################################
# Define Execution
# Binary to run
executable = 'dock6.mpi'
input_file = 'mpi1.in' #################################################
# Command line options to pass
executable_opts = ['-i', '/scratch/dock6/tutorials/mpi_demo/4_dock/'+input_file, '-o', 'mpi.out']#, '> miniGMG.out'] ####################################
# Where the output is written to
# logfile = 'miniGMG.out'
# # Store the output file (used for validation later)
# keep_files = [logfile]
# Parameters - Compilers - Defined as their Spack specs (use spec or hash)
spec = parameter([ #####################################################################
# 'dock@6.9%gcc@10.3.0',
# 'dock@6.9%arm@21.0.0.879',
'dock@6.9%nvhpc@21.2',
])
# Parameters - MPI / Threads - Used for scaling studies
parallelism = parameter([ #################################################################
{ 'nodes' : 1, 'mpi' : 64, 'omp' : 1},
])
@run_before('run')
def prepare_job(self):
# self.job.launcher = rcl.LauncherWrapper(self.job.launcher, 'time -p')
self.job.launcher.options = ['--exclusive']
@run_before('run')
def set_profiler(self):
self.proffile = 'sprofile1.map'
self.keep_files = [self.proffile]
# self.modules.append('arm-forge@21.0')
self.job.launcher = rcl.LauncherWrapper(self.job.launcher, 'map', ['--profile', '--output='+self.proffile])
# @run_before('run')
# def perf_libs_tools(self):
# apl_dir = 'plt_out'
# self.prerun_cmds.append('mkdir {0}'.format(apl_dir))
# self.variables['ARMPL_SUMMARY_FILEROOT'] = '$PWD/{0}/'.format(apl_dir)
# self.keep_files = [apl_dir]
# self.job.launcher.options = ['--export=ALL,LD_PRELOAD=libarmpl_mp-summarylog.so']
# apl_file = '{0}_{1}_apl_summary.log'.format(self.log_app_name, self.log_test_name)
# self.postrun_cmds.append('process_summary.py {0}/*.apl > {1}'.format(apl_dir, apl_file))
# self.keep_files.append(apl_file)
# /scratch/opt/spack/linux-amzn2-graviton2/gcc-10.3.0/perf-libs-tools-git-master-ja4rifwkuuj4nw2c7usikdlomimo6yxm/lib/
# @sn.sanity_function
# def get_time_in_sec_list(self, min_list, sec_list):
# return list(x * 60 + y
# for x, y in sn.zip(min_list, sec_list))
@run_before('sanity')
def set_sanity_patterns(self):
if self.input_file != 'mpi1.in':
valid_regex = r'Molecule:\s+ZINC00001555\n\n\s+Anchors:\s+(\S+)\n\s+Orientations:\s+(\S+)\n\s+Conformations:\s+(\S+)\n\n\s+Grid_Score:\s+(\S+)'
Grid_Score = sn.extractsingle(valid_regex, 'mpi.out', 4, float)
expected_lower = -25.860000
expected_upper = -25.830000
self.sanity_patterns = sn.assert_bounded(Grid_Score, expected_lower, expected_upper)
else:
valid_regex = r'grid box quantifiers.\n\n-+\nMolecule:\s+1VRT.lig\n\n\s+Anchors:\s+(\S+)\n\s+Orientations:\s+(\S+)\n\s+Conformations:\s+(\S+)\n\n\s+Grid_Score:\s+(\S+)'
Grid_Score = sn.extractsingle(valid_regex, 'mpi.out', 4, float)
expected_lower = -60.900000
expected_upper = -60.870000
self.sanity_patterns = sn.assert_bounded(Grid_Score, expected_lower, expected_upper)
# self.reference = {
# '*': {'Total Time': (0, None, None, 's'),}
# }
# # perf_regex_real = r'real\s+(\S+)'
# # real = sn.extractsingle(perf_regex_real, self.stderr, 1, float)
# self.perf_patterns = {
# # 'Total Time': real,
# }
| [
"noreply@github.com"
] | noreply@github.com |
62c8e000ff730bcbea4570291d047b650df3c345 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_44485.py | ad2e839330e7dfc437fe5ef7dea9d16f2ba0db61 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,844 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((448.215, 373.671, 502.62), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((464.79, 386.681, 568.915), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((478.084, 395.023, 648.801), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((402.568, 287.673, 599.847), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((530.933, 451.66, 826.114), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((451.427, 387.613, 546.741), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((450.962, 387.769, 545.766), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((446.504, 405.272, 524.516), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((421.767, 415.544, 533.154), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((398.971, 403.391, 544.622), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((383.678, 413.102, 523.117), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((368.115, 429.272, 540.32), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((463.815, 385.186, 521.177), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((277.179, 480.543, 560.233), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((389.144, 515.459, 724.846), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((389.144, 515.459, 724.846), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((400.019, 499.856, 704.16), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((407.894, 480.448, 685.382), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((419.822, 481.174, 659.925), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((430.114, 457.122, 648.593), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((443.95, 449.348, 625.252), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((455.987, 439.318, 601.527), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((233.64, 508.462, 710.824), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((666.939, 349.889, 475.745), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((485.233, 433.619, 621.765), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((485.233, 433.619, 621.765), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((464.027, 413.439, 623.683), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((458.301, 384.807, 624.985), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((465.303, 363.728, 643.478), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((444.821, 321.096, 527.813), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((488.001, 400.434, 760.614), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((456.986, 360.146, 562.353), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((456.92, 359.898, 562.347), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((428.883, 360.578, 559.028), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((414.279, 382.726, 568.568), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((422.678, 404.638, 583.926), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((426.82, 428.293, 569.297), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((408.78, 442.883, 553.56), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((391.342, 452.132, 533.736), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((475.974, 457.917, 546.513), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((305.133, 443.399, 522.576), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((509.721, 430.9, 562.748), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((501.996, 410.394, 574.058), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((477.358, 366.557, 603.465), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((452.066, 323.91, 634.045), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((472.756, 281.136, 569.75), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((401.913, 298.465, 723.226), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((451.785, 441.396, 544.942), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((461.925, 430.852, 569.157), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((473.459, 413.87, 589.291), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((493.392, 401.809, 606.299), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((509.848, 393.316, 628.565), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((524.773, 378.984, 649.202), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((492.816, 385.418, 576.442), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((559.742, 368.967, 723.785), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
2080c9590d8f599c2b46a7b06a7375201ebc5293 | e656912941a35ff51afd056c406ede34a2dbfa33 | /venv/Scripts/mavgpslock.py | 519f545f580df7f6f7e1dec5691a6f6fa1013178 | [] | no_license | madboi01/GCS-test1 | 85f85bd78c67f88398753694f14156efa36c1e1d | 53c2a5ae07dfa5c63ce6fa5c7c0d29a7ea4840f5 | refs/heads/master | 2023-08-04T14:07:20.208312 | 2021-09-23T13:02:24 | 2021-09-23T13:02:24 | 409,589,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | #!c:\users\dell\pycharmprojects\map\venv\scripts\python.exe
'''
show GPS lock events in a MAVLink log
'''
from __future__ import print_function
import time
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="condition for packets")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def lock_time(logfile):
'''work out gps lock times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
locked = False
start_time = 0.0
total_time = 0.0
t = None
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
return 0
unlock_time = time.mktime(time.localtime(m._timestamp))
while True:
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
if locked:
total_time += time.mktime(t) - start_time
if total_time > 0:
print("Lock time : %u:%02u" % (int(total_time)//60, int(total_time)%60))
return total_time
t = time.localtime(m._timestamp)
if m.fix_type >= 2 and not locked:
print("Locked at %s after %u seconds" % (time.asctime(t),
time.mktime(t) - unlock_time))
locked = True
start_time = time.mktime(t)
elif m.fix_type == 1 and locked:
print("Lost GPS lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
elif m.fix_type == 0 and locked:
print("Lost protocol lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
return total_time
total = 0.0
for filename in args.logs:
total += lock_time(filename)
print("Total time locked: %u:%02u" % (int(total)//60, int(total)%60))
| [
"66716237+madboi01@users.noreply.github.com"
] | 66716237+madboi01@users.noreply.github.com |
b3b06deaa3d53ad7fecd9e726b71f1a4e7ccb8ca | 33b2a4079a1c00e9b20d8dd87abb98ab75b63618 | /posts/tests/test_urls.py | 45d78cbde2354fb0f8063ced0ffc46faa3c3d1a6 | [] | no_license | personage-hub/yatube | f975040cfbb298daf736d66b09753d545f7e7213 | 5cac340a0ae671392b2558416f57034c620bea35 | refs/heads/master | 2023-08-14T23:50:01.520456 | 2021-09-30T20:37:27 | 2021-09-30T20:37:27 | 412,219,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,574 | py | import random
from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from posts.models import Comment, Group, Post
URL_HOMEPAGE = '/'
URL_GROUP_PAGE = '/group/test_slug/'
URL_NEW_POST = '/new/'
URL_USER_PROFILE = '/test_user/'
URL_POST = '/test_user/1/'
URL_POST_EDIT = '/test_user/1/edit/'
URL_LOGIN = '/auth/login/?next='
URL_NON_EXIST = 'url_non_exist' + str(random.randint(0, 10000))
User = get_user_model()
COMMENT_TEXT = 'Тестовый комментарий {index}'
COMMENT_TEST_LEN = 10
test_user_author = {
'username': 'test_user',
'email': 'test@ttesst.ru',
'password': 'Test2password'
}
test_user_not_author = {
'username': 'random_user',
'email': 'test2@ttesst.ru',
'password': 'Test2password'
}
test_group = {
'title': 'Тестовая группа ' * 5,
'slug': 'test_slug'
}
class StaticURLTests(TestCase):
def setUp(self):
self.guest_client = Client()
def test_homepage(self):
response = self.guest_client.get(URL_HOMEPAGE)
self.assertEqual(response.status_code, HTTPStatus.OK)
class URLTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.author = User.objects.create_user(**test_user_author)
cls.post_1 = Post.objects.create(
text='Тестовый №1 с длинным текстом',
author=cls.author
)
cls.non_author = User.objects.create_user(**test_user_not_author)
cls.group = Group.objects.create(**test_group)
comments = [
Comment(
post=URLTest.post_1,
author=URLTest.author,
text=COMMENT_TEXT.format(index=i),
)
for i in range(1, COMMENT_TEST_LEN)
]
Comment.objects.bulk_create(comments)
def setUp(self):
self.guest_client = Client()
self.author = URLTest.author
self.non_author = URLTest.non_author
self.authorized_client_author = Client()
self.authorized_client_author.force_login(self.author)
self.authorized_client_non_author = Client()
self.authorized_client_non_author.force_login(self.non_author)
def test_url_url_exists_at_desired_location_authorized(self):
"""Страницы доступны для просмотра авторизиованным пользователем."""
urls = [
URL_HOMEPAGE,
URL_GROUP_PAGE,
URL_NEW_POST,
URL_USER_PROFILE,
URL_POST,
URL_POST_EDIT
]
for url in urls:
with self.subTest(url=url):
response = self.authorized_client_author.get(url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_url_url_exists_at_desired_location_anon(self):
"""Страницы доступны для просмотра неавторизованным пользователем."""
urls = [
URL_HOMEPAGE,
URL_GROUP_PAGE,
URL_USER_PROFILE,
URL_POST
]
for url in urls:
with self.subTest(url=url):
response = self.guest_client.get(url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_new_edit_post_redirects_anon_on_login(self):
"""
Страница создания и редактирования поста
перенаправит анонимного пользователя
на страницу логина.
"""
redirect_pages = {
URL_NEW_POST: (URL_LOGIN + URL_NEW_POST),
URL_POST_EDIT: (URL_LOGIN + URL_POST_EDIT)
}
for url, redirect_page in redirect_pages.items():
with self.subTest(redirect_page=redirect_page):
response = self.guest_client.get(url, follow=True)
self.assertRedirects(response, redirect_page)
def test_edit_post_redirect_non_author_on_post_page_edit(self):
"""
Страница редактирования поста перенаправит не автора просмотра
на страницу просмотра поста.
"""
response = self.authorized_client_non_author.get(
URL_POST_EDIT, follow=True
)
self.assertRedirects(response, URL_POST)
def test_urls_uses_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
templates_url_names = {
URL_HOMEPAGE: 'posts/index.html',
URL_GROUP_PAGE: 'posts/group.html',
URL_NEW_POST: 'posts/edit_post.html',
URL_POST_EDIT: 'posts/edit_post.html',
URL_NON_EXIST: 'misc/404.html',
URL_POST: 'posts/includes/comments.html'
}
for url, template in templates_url_names.items():
with self.subTest(url=url):
response = self.authorized_client_author.get(url)
self.assertTemplateUsed(response, template)
def test_page_not_found_get_404(self):
"""Если страница не найдена то возращается ошибка 404."""
urls = [
URL_NON_EXIST,
]
for url in urls:
with self.subTest(url=url):
response = self.guest_client.get(url)
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
| [
"alexander.niyazov@yandex.ru"
] | alexander.niyazov@yandex.ru |
c73b2322cd23ac5ea3b5a4646958132b8c3e2e6f | 342364ca8ad9bd0b0fd6629eca55425ef27a069e | /run_dbCheck.py | 98f8fe7f22563f16fdd2d85666ba415e9154417b | [] | no_license | kingking888/spiderIP | a49f813cb40651b4c4a8ce39699b62f2a431fc9d | d171ce4ba4502e3c5089353b3c2b1cf56264aabf | refs/heads/master | 2020-07-02T17:19:20.766704 | 2019-08-10T07:44:50 | 2019-08-10T07:44:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = '80022068'
__mtime__ = '2019/8/10'
# qq:2456056533
佛祖保佑 永无bug!
"""
from spiderIP.dbIPCheck import DbIPCheck
if __name__ == '__main__':
db_ipcheck = DbIPCheck()
db_ipcheck.del_ip()
| [
"2456056533@qq.com"
] | 2456056533@qq.com |
906cd4d8ad7c433c507a091c53dfd90fe4514f34 | 7f53a1ba1920a5301ca325d4faf480f3799c0a48 | /merger_2012_emb.py | 654e169fbd8f372fa53edddcf0d02d83b14ee90c | [] | no_license | rmanzoni/tools | a7fe8083628954f7f02e80add1d3dd761720e8e6 | e2189860d26be2a4276ec2ca3fe220e90adf9158 | refs/heads/master | 2021-01-01T18:37:33.731578 | 2015-04-15T13:46:12 | 2015-04-15T13:46:12 | 18,681,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | import os
import ROOT
from ROOT import gROOT, gStyle, TFile, gDirectory
gROOT.SetBatch(True)
#for mass in [110,115,120,125,130,135,140,145] :
for mass in [125] :
print "Higgs mass =", str(mass)
# search in current dir
matches = []
dirList = os.listdir(os.getcwd())
for fname in dirList:
if str(fname).find('mH'+str(mass)) > 0 and str(fname).find('for_smoothing_') < 0 :
if ( str(fname).find("BOOSTED") > 0 or str(fname).find("VBF") > 0 ) :
matches.append(fname)
for t in ["VBF","BOOSTED"] :
Files = []
for m in matches :
if str(m).find(t) > 0 :
if str(m).find("svfitMass.root") > 0 :
noShift = TFile.Open(m,'read')
Files.append(noShift)
elif str(m).find("svfitMass*1.03.root") > 0 :
upShift = TFile.Open(m,'read')
Files.append(upShift)
elif str(m).find("svfitMass*0.97.root") > 0 :
doShift = TFile.Open(m,'read')
Files.append(doShift)
elif str(m).find("svfitMass*1.06.root") > 0 :
upShiftem = TFile.Open(m,'read')
Files.append(upShiftem)
if t == "VBF" :
cat = "SM2"
elif t == "BOOSTED" :
cat = "SM1"
print 'category: ',t, cat
folderName = "LimitInputs"
folderList = os.listdir(os.getcwd())
found = False
for f1 in folderList :
if str(f1) == folderName :
found = True
if found == False :
os.mkdir(folderName)
if str(m).find(t) < 0 : continue
Shifted = TFile.Open(str(folderName+"/tauTau_2012_"+cat+"_mH"+str(mass)+".root"),'recreate')
Shifted.mkdir(str("tauTau_2012_"+cat))
for h in Files :
print 'File name: ',h.GetName()
h.cd(str("tauTau_"+cat))
dirList = gDirectory.GetListOfKeys()
for k1 in dirList :
histo = k1.ReadObj()
Shifted.cd(str("tauTau_2012_"+cat))
histo.Write()
for j in Files :
j.Close()
Shifted.Close()
print '+++++++++++'
print '+ end job +'
print '+++++++++++'
# import fnmatch
# search through dir and subdirs
# matches = []
# for root, dirnames, filenames in os.walk(os.getcwd()):
# for filename in fnmatch.filter(filenames, '*VBF*'):
# matches.append(os.path.join(root, filename))
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
d37d447bd7ce2b1d813f28d559dadf00e8be9f92 | df25eefe4878c08b0f51f6ff19f48054ba6dbc2a | /test/espnet2/text/test_text_converter.py | 0d1f32b94924e5d24b9c95b25a217e07483a5f3e | [
"Apache-2.0"
] | permissive | sas91/espnet | 7f14a9394469993fb948758c7b0b78f76ad12cbe | 8e263d6512eb84cebeaecf6b99204c102a8252b5 | refs/heads/master | 2021-07-13T18:45:13.981483 | 2020-06-02T08:43:25 | 2020-06-02T08:43:25 | 142,748,209 | 1 | 0 | Apache-2.0 | 2018-07-29T09:37:35 | 2018-07-29T09:37:35 | null | UTF-8 | Python | false | false | 2,565 | py | from pathlib import Path
import string
import pytest
import sentencepiece as spm
from espnet2.text.char_tokenizer import CharTokenizer
from espnet2.text.sentencepiece_tokenizer import SentencepiecesTokenizer
from espnet2.text.word_tokenizer import WordTokenizer
@pytest.fixture(params=[None, " "])
def word_converter(request):
return WordTokenizer(delimiter=request.param)
@pytest.fixture
def char_converter():
return CharTokenizer(["[foo]"])
@pytest.fixture
def spm_srcs(tmp_path: Path):
input_text = tmp_path / "text"
vocabsize = len(string.ascii_letters) + 4
model_prefix = tmp_path / "model"
model = str(model_prefix) + ".model"
input_sentence_size = 100000
with input_text.open("w") as f:
f.write(string.ascii_letters + "\n")
spm.SentencePieceTrainer.Train(
f"--input={input_text} "
f"--vocab_size={vocabsize} "
f"--model_prefix={model_prefix} "
f"--input_sentence_size={input_sentence_size}"
)
sp = spm.SentencePieceProcessor()
sp.load(model)
with input_text.open("r") as f:
vocabs = {"<unk>", "▁"}
for line in f:
tokens = sp.DecodePieces(list(line.strip()))
vocabs |= set(tokens)
return model, vocabs
@pytest.fixture
def spm_converter(tmp_path, spm_srcs):
model, vocabs = spm_srcs
sp = spm.SentencePieceProcessor()
sp.load(model)
token_list = tmp_path / "token.list"
with token_list.open("w") as f:
for v in vocabs:
f.write(f"{v}\n")
return SentencepiecesTokenizer(model=model)
def test_Text2Sentencepieces_repr(spm_converter: SentencepiecesTokenizer):
print(spm_converter)
def test_Text2Sentencepieces_text2tokens(spm_converter: SentencepiecesTokenizer):
assert spm_converter.tokens2text(spm_converter.text2tokens("Hello")) == "Hello"
def test_Text2Words_repr(word_converter: WordTokenizer):
print(word_converter)
def test_Text2Words_text2tokens(word_converter: WordTokenizer):
assert word_converter.text2tokens("Hello World!! Ummm") == [
"Hello",
"World!!",
"Ummm",
]
def test_Text2Words_tokens2text(word_converter: WordTokenizer):
assert word_converter.tokens2text("Hello World!!".split()) == "Hello World!!"
def test_Text2Chars_repr(char_converter: CharTokenizer):
print(char_converter)
def test_Text2Chars_text2tokens(char_converter: CharTokenizer):
assert char_converter.text2tokens("He[foo]llo") == [
"H",
"e",
"[foo]",
"l",
"l",
"o",
]
| [
"naoyuki.kamo829@gmail.com"
] | naoyuki.kamo829@gmail.com |
77032a87267e6f84a839e1be407d819897f193f7 | 96cd5b21d06c89ec6a3c927dcf410554ad49c720 | /svn_monitor/svn_monitor.py | f4368337ec7825af2923f9ca99adc797ab9f168b | [] | no_license | yongzhizhan/toys | 278d5c1eb977a76baa35c02cef0c620e8553e80e | e720f54dd6e4f7646cf18089c6fe90f047eae520 | refs/heads/master | 2021-01-23T08:03:47.272990 | 2017-01-31T16:56:39 | 2017-01-31T16:56:39 | 80,527,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,290 | py | #!/usr/bin/env python
# coding:utf-8
import threading
from time import sleep
from database import DataBase
from mail import Mail
from svn_model import SvnModel
from models import SvnInfo
from models import MailInfo
import config
class SvnMonitor:
timer = None
session = None
stop_work = False
intervalS = 3
def __init__(self, intervalS):
self.intervalS = intervalS
database = DataBase("sqlite:///./data.db")
database.init_db()
self.session = database.get_session()
self.timer = threading.Timer(0, self.run)
def __del__(self):
self.session.remove()
def start(self):
self.timer.start()
return
def stop(self):
self.stop_work = True
self.timer.cancel()
def run(self):
while not self.stop_work:
self._run()
sleep(self.intervalS)
def _run(self):
try:
svn_infos = self.session.query(SvnInfo).all()
for svn_info in svn_infos:
path = svn_info.path
svn_model = SvnModel(path, config.svn_user, config.svn_pwd)
cur_revision = int(svn_model.get_revision())
if svn_info.last_revision == cur_revision:
continue
mail_infos = self.session.query(MailInfo).filter(MailInfo.svn_id == svn_info.id).all()
for mail_info in mail_infos:
self.notify(svn_info, mail_info.mail_address, cur_revision)
#update
self.session.query(SvnInfo).filter(SvnInfo.id == svn_info.id).update({SvnInfo.last_revision: cur_revision})
self.session.commit()
except Exception as e:
self.session.rollback()
print e
return
def notify(self, svn_info, mail_address, cur_revision):
print "notify revision is " + str(cur_revision)
mail = Mail(config.email_smtp, "svn monitor", "to " + mail_address, "new revision: " + str(cur_revision), svn_info.path + " updated current revision is: " + str(cur_revision))
if config.email_use_ssl:
mail.starttls()
mail.login(config.email_user, config.email_pwd)
mail.send(config.email_user, mail_address)
return
| [
"yongzhi.zhan@gmail.com"
] | yongzhi.zhan@gmail.com |
561fbf76952e72959088ff99ae838295f3938bc7 | 479d3414e914f144fff20ee71872472ac84ca410 | /codespace/python/telegram/_files/inputfile.py | 730301869bd5e67593a4565ada2e146058b8f953 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | tzpBingo/github-trending | 0fa4e0e08743f0683f68fd54d74eec466bc525e0 | 505014e84bdea7e2732296821028df20c0305390 | refs/heads/master | 2023-07-24T13:29:47.393940 | 2023-07-19T09:39:29 | 2023-07-19T09:39:29 | 102,687,887 | 49 | 20 | MIT | 2023-05-22T21:33:53 | 2017-09-07T03:39:42 | Python | UTF-8 | Python | false | false | 4,191 | py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram InputFile."""
import mimetypes
from typing import IO, Optional, Union
from uuid import uuid4
from telegram._utils.files import load_file
from telegram._utils.types import FieldTuple
_DEFAULT_MIME_TYPE = "application/octet-stream"
class InputFile:
"""This object represents a Telegram InputFile.
.. versionchanged:: 20.0
* The former attribute ``attach`` was renamed to :attr:`attach_name`.
* Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would
like to have the mime type automatically guessed, please pass :paramref:`filename`
in addition.
Args:
obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files
content as bytes or string.
Note:
If :paramref:`obj` is a string, it will be encoded as bytes via
:external:obj:`obj.encode('utf-8') <str.encode>`.
.. versionchanged:: 20.0
Accept string input.
filename (:obj:`str`, optional): Filename for this InputFile.
attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in
the request to Telegram should point to the multipart data via an ``attach://`` URI.
Defaults to `False`.
Attributes:
input_file_content (:obj:`bytes`): The binary content of the file to send.
attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in
the request to Telegram should point to the multipart data via a an URI of the form
``attach://<attach_name>`` URI.
filename (:obj:`str`): Filename for the file to be sent.
mimetype (:obj:`str`): The mimetype inferred from the file to be sent.
"""
__slots__ = ("filename", "attach_name", "input_file_content", "mimetype")
def __init__(
self,
obj: Union[IO[bytes], bytes, str],
filename: Optional[str] = None,
attach: bool = False,
):
if isinstance(obj, bytes):
self.input_file_content: bytes = obj
elif isinstance(obj, str):
self.input_file_content = obj.encode("utf-8")
else:
reported_filename, self.input_file_content = load_file(obj)
filename = filename or reported_filename
self.attach_name: Optional[str] = "attached" + uuid4().hex if attach else None
if filename:
self.mimetype: str = (
mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE
)
else:
self.mimetype = _DEFAULT_MIME_TYPE
self.filename: str = filename or self.mimetype.replace("/", ".")
@property
def field_tuple(self) -> FieldTuple:
"""Field tuple representing the contents of the file for upload to the Telegram servers.
Returns:
Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]:
"""
return self.filename, self.input_file_content, self.mimetype
@property
def attach_uri(self) -> Optional[str]:
"""URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if
:attr:`attach_name` is :obj:`None`.
"""
return f"attach://{self.attach_name}" if self.attach_name else None
| [
"tzpbingo@gmail.com"
] | tzpbingo@gmail.com |
e44aceb67241c06f62b74d6bfc67352c185a1223 | 5f8e8bbe15e36d6d25e80c726b847c686699a279 | /get_yes_or_no.py | ee42d878e16c1bbfa80c553e3649b6e8f50c44af | [] | no_license | gkl1107/Python-algorithm | e55443cc6fe6159c1557f0041b2241d494ac5198 | 06401a93c2675706f3899ec81d1fc563be9d499f | refs/heads/master | 2022-04-13T18:47:59.014878 | 2020-04-04T02:17:25 | 2020-04-04T02:17:25 | 142,956,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | #!/usr/bin/python
def get_yes_or_no(message):
valid_input = False
while not valid_input:
answer = raw_input(message)
answer = answer.upper() # convert to upper case
if answer == 'Y' or answer == 'N':
valid_input = True
else:
print('Please enter Y for yes or N for no.')
return answer
response = get_yes_or_no('Do you like lima beans? Y)es or N)o: ')
if response == 'Y':
print('Great! They are very healthy.')
else:
print('Too bad. If cooked right, they are quite tasty.')
| [
"kailuguan@Kailu-Air.localdomain"
] | kailuguan@Kailu-Air.localdomain |
684245438a626b9eaaf31134baeaa484f6bfaadc | db91297805de68086054016e643eceea7cf7c27b | /dnn.py | f45fcedac13fcb62eccf2c01b3bdf5e1db8b0650 | [] | no_license | citisy/BasicNeuralNetwork | 85349c919e4cb51465791df96a35fa8f3083d0a5 | a2c7def05f7bb7b886579de93948293436c99499 | refs/heads/master | 2022-05-02T22:01:32.068522 | 2022-03-28T15:39:36 | 2022-03-28T15:39:36 | 211,262,515 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import seaborn as sns
from matplotlib.animation import FFMpegWriter as Writer
sns.set(style="white", palette="muted", color_codes=True)
def get_dataset():
"""
data: [100, 1]
label: [100, 1]
"""
size = 100
data = np.linspace(-2, 2, size)
noise = np.random.normal(0.0, 0.5, size=size)
label = data ** 2 + noise
data = data.reshape((-1, 1))
label = label.reshape((-1, 1))
return data, label
def np_dnn():
from utils.models import Sequential
from utils.layers import Input, Dense
from utils.activations import sigmoid
from utils.optimizers import GradientDescent
from utils.losses import mse
def callback(model, epoch, x, y):
if not epoch % 100:
predict = model.predict(x)
loss = model.loss(predict, y)
line, = ax.plot(x, predict, c='b', animated=True)
text = ax.text(1.25, 0.5, 'loss=%f' % loss)
ims.append([sca, line, text])
data, label = get_dataset()
# draw pictures
fig, ax = plt.subplots()
ims = []
fig.set_tight_layout(True)
sca = ax.scatter(data, label, c='r', animated=True)
unit_list = [8, 8]
model = Sequential()
# input layer
model.add(Input(data.shape[1]))
# hidden layer
for unit in unit_list:
model.add(Dense(unit, activation=sigmoid))
# output layer
model.add(Dense(label.shape[1]))
model.compile(
optimizer=GradientDescent(lr=.005),
loss=mse,
)
model.fit(data, label,
batch_size=64,
epochs=10000,
callbacks=lambda epoch: callback(model, epoch, data, label)
)
# draw pictures
ani = animation.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000, repeat=True)
img_save_path = 'img/np_dnn.mp4'
fps = 10
ani.save(img_save_path, writer=Writer(fps=fps))
plt.show()
def keras_dnn():
"""
keras == 2.8.0
tensorflow == 2.8.0
"""
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.activations import sigmoid
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import mse
from tensorflow.keras.callbacks import LambdaCallback
def callback(model, epoch, x, loss):
if not epoch % 20:
predict = model.predict(x)
line, = ax.plot(x, predict, c='b', animated=True)
text = ax.text(1.25, 0.5, 'loss=%f' % loss)
ims.append([sca, line, text])
data, label = get_dataset()
# draw pictures
fig, ax = plt.subplots()
ims = []
fig.set_tight_layout(True)
sca = ax.scatter(data, label, c='r', animated=True)
batch_print_callback = [
LambdaCallback(on_epoch_end=lambda epoch, logs: callback(model, epoch, data, logs['loss']))
]
unit_list = [8, 8]
model = Sequential()
# input layer
model.add(Input(shape=(data.shape[1],)))
# hidden layer
for unit in unit_list:
model.add(Dense(unit, activation=sigmoid))
# output layer
model.add(Dense(label.shape[1]))
model.compile(
optimizer=SGD(learning_rate=0.05),
loss=mse,
)
# model.summary()
model.fit(data, label,
epochs=2000,
batch_size=64,
callbacks=batch_print_callback,
verbose=0,
)
# draw pictures
ani = animation.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000, repeat=True)
img_save_path = 'img/keras_dnn.mp4'
fps = 10
ani.save(img_save_path, writer=Writer(fps=fps))
plt.show()
if __name__ == '__main__':
np.random.seed(42)
np_dnn()
# keras_dnn()
| [
"citisy@163.com"
] | citisy@163.com |
514dc7f9a17391be6ae9cb0b9342308cae1dda3e | d008761adfe0715495efe0cbfed9ec6c3ace2a71 | /lib/atari/helpers.py | f6e5219d30e31e484f6c1a0dcb326980d04b21f4 | [
"MIT"
] | permissive | dennybritz/reinforcement-learning | 6d95152ae44005d0aafcd7cf8008239b174b91de | 2b832284894a65eccdd82353cc446f68d100676e | refs/heads/master | 2023-08-30T02:21:09.480122 | 2022-09-20T23:57:31 | 2022-09-20T23:57:31 | 66,483,240 | 21,070 | 6,725 | MIT | 2023-07-13T09:54:43 | 2016-08-24T17:02:41 | Jupyter Notebook | UTF-8 | Python | false | false | 829 | py | import numpy as np
class AtariEnvWrapper(object):
"""
Wraps an Atari environment to end an episode when a life is lost.
"""
def __init__(self, env):
self.env = env
def __getattr__(self, name):
return getattr(self.env, name)
def step(self, *args, **kwargs):
lives_before = self.env.ale.lives()
next_state, reward, done, info = self.env.step(*args, **kwargs)
lives_after = self.env.ale.lives()
# End the episode when a life is lost
if lives_before > lives_after:
done = True
# Clip rewards to [-1,1]
reward = max(min(reward, 1), -1)
return next_state, reward, done, info
def atari_make_initial_state(state):
return np.stack([state] * 4, axis=2)
def atari_make_next_state(state, next_state):
return np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2) | [
"dennybritz@gmail.com"
] | dennybritz@gmail.com |
0617a144af6f6528355b038ef45fa54c7553e498 | 51de507d6e8990aa28538ab0831721140418e4e9 | /Prodaja.py | 549f4117d7c510dd2fc8d910df781c9fd82e541c | [] | no_license | PetrekanovicBI48/Projekat.1 | 67ea827305c2b891ccb18049fa3f6de56cdf11c4 | bb1ff70299863a26874b9944d7e62f35dd03793e | refs/heads/main | 2023-02-15T21:23:54.152072 | 2021-01-04T16:30:02 | 2021-01-04T16:30:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,869 | py | from racun.racunIO import ucitaj_racun, sacuvaj_racun
from knjige.knjigeIO import ucitaj_knjige
from akcije.akcijeIO import ucitaj_akcije
from datetime import datetime
from korisnici.korisniciIO import ucitaj_korisnike
racuni = ucitaj_racun()
knjige = ucitaj_knjige()
akcije = ucitaj_akcije()
korisnici=ucitaj_korisnike()
def prodaja_knjige():
korpa = []
now = datetime.now()
novi_racun = {
"sifra": 666,
"prodavac": "Mirko Mirkovic",
"datum_vreme": "2020-12-27T18:16:25.925653",
"artikli": [], "akcije": [],
"cena": 0.0
}
novi_racun['sifra'] = racuni[-1]['sifra'] + 1
unos_artikala = True
while(unos_artikala):
sifra = input("\n Unesi sifru knjige ili akcije (unesi 'nazad' za povratak u meni, unesi 'x' za prekid unosa knjiga):")
if sifra == 'nazad':
return
elif sifra == 'x':
unos_artikala = False
else:
for knjiga in knjige:
if knjiga['sifra'] == int(sifra):
kolicina = input("\n Unesi kolicinu:")
knjiga['kolicina'] = kolicina
novi_racun['artikli'].append(knjiga)
novi_racun['cena'] += knjiga['cena']*int(kolicina)
continue
for akcija in akcije:
if akcija['sifra'] == int(sifra):
kolicina = input("\n Unesi kolicinu:")
akcija['kolicina'] = kolicina
novi_racun['akcije'].append(akcija)
novi_racun['cena'] += akcija['nova cena']*int(kolicina)
ispis_knjiga_racun(racuni)
ispis_akcija_racun(racuni)
# while True:
# print('\nZelite li da nastavite kupovinu?\n1. Da\n2. Odustani')
# stavka = int(input('Unesite odgovor:'))
# if stavka == 1:
# novi_racun['datum_vreme'] = now.strftime("%d.%m.%Y. %H:%M:%S")
# racuni.append(novi_racun)
# sacuvaj_racun(racuni)
# elif stavka == 2:
# return False
# else:
# print('Uneli ste pogresnu opciju. Pokusajte ponovo.')
def ispisi_racun(akcije):
zaglavlje = f"{'sifra':<10}" \
f"{'naslov':<20}" \
f"{'stara cena':^20}" \
f"{'nova cena':^20}" \
f"{'datum vazenja':^20}"
print(zaglavlje)
print("-" * len(zaglavlje))
for i in range(0, len(akcije)):
for j in range(0, len(akcije[i]['artikli'])):
za_ispis = f"{akcije[i]['sifra']:<10}" \
f"{akcije[i]['artikli'][j]['naslov']:<20}" \
f"{akcije[i]['artikli'][j]['cena']:<20}" \
f"{akcije[i]['nova cena']:<20}" \
f"{akcije[i]['datum_vazenja']:<20}"
print(za_ispis)
#
# def prodaja_knjige():
# global korpa
# z = -1
# i = 0
# while True:
# sifra = input("\nUnesite sifru (unesite 'nazad' za povratak):")
# if (sifra == 'nazad'):
# return False
# elif (sifra != ''):
# result = pretraga_knjiga_string("sifra", ' ')
# if (result == None):
# break
# else:
# print("Sifra ne sme da sadrzi razmake, pokusajte ponovo.")
# if (prodaja_knjige() == False):
# return False
# else:
# print("Unesite sifru.")
# if (prodaja_knjige() == False):
# return False
# for knjiga in knjige:
# if (knjiga['sifra'] == sifra):
# print('Knjiga je pronadjena.')
# z = i
# break
# i += 1
# if (z == -1):
# print('Knjiga nije pronadjena, pokusajte ponovo.')
# if (prodaja_knjige() == False):
# return False
# sadrzaj_korpe=[]
# while True:
# try:
# q = int(input('Kolicina:'))
# break
# except ValueError: print('Unesite cele brojeve')
# print('Sadrzaj se dodaje u korpu:')
# for i in range(q):
# sadrzaj_korpe+= [knjige[z]]
# list(sadrzaj_korpe)
# while True:
# print('\nZelite li da nastavite kupovinu?\n1. Da\n2. Odustani')
# stavka = input('Unesite odgovor:')
# if stavka == '1':
# korpa += sadrzaj_korpe
# return True
# elif stavka == '2':
# return False
# else:
# print('Uneli ste pogresnu opciju. Pokusajte ponovo.')
def pravljenje_racuna():
racun = {
"sifra": 666,
"prodavac": "Mirko Mirkovic",
"datum_vreme": "2020-12-27T18:16:25.925653",
"artikli": [],
"ukupno": 0.0
}
stari_racun = ucitaj_racun()
z=0
for racun in stari_racun:
z+=1
racun['sifra'] = z
racun['prodavac'] = korisnici.korisnicko_ime()
racun['datum_vreme'] = datetime.now().isoformat()
racun['artikli'] = artikli
racun['ukupno'] = ukupno
return racun
def ispis_zaglavlja(racuni):
print('sifra racuna: '), print(int[racun['sifra']])
print('prodavac: '), print(str[korisnici['korisnicko_ime']])
print('datum i vreme: '), print(datetime.now().isoformat())
print('__'*20)
def ispis_knjiga_racun(racuni):
global za_ispis
zaglavlje = f"{'artikli':<20}" \
f"{'cena':<20}" \
f"{'kolicina':<20}"
print(zaglavlje)
print("-" * len(zaglavlje))
for racun in racuni:
for i in range(0, len(racuni)):
for j in range(0, len(racuni[i]['artikli'])):
za_ispis = f"{racun[i]['artikli'][j]['naslov']:<20}" \
f"{racun[i]['artikli'][j]['cena']:^20}" \
f"{racun[i]['artikli'][j]['kolicina']:^20}"
print(za_ispis)
print("-" * len(zaglavlje))
def ispis_akcija_racun(racuni):
zaglavlje = f"{'artikli':<20}" \
f"{'cena':<20}" \
f"{'kolicina':<20}"
print(zaglavlje)
print("-" * len(zaglavlje))
for racun in racuni:
za_ispis = f"{racun['akcije']['naslov']:<20}" \
f"{racun['cena']:<20}" \
f"{racun['akcije']['kolicina']:<20}"
print(za_ispis)
print("-" * len(zaglavlje))
def kraj_kupovine():
racuni = ucitaj_racun()
racun = pravljenje_racuna()
print('\nKnjige koje su odabrane su:')
ispis_knjiga_racun(racuni)
ispis_akcija_racun(racuni)
while True:
print('\nZelite li da nastavite?\n1. Da\n2. Odustani')
stavka = input('Unesite:')
if stavka == '1':
racuni.append(racun)
break
elif stavka == '2':
return False
else:
print('Pogresan unos.')
racun.save(racuni)
print('Prodaja zavrsena. Izvolite racun:')
print(racun)
return False
# def dodavanje_akcije():
# nova_akcija = {
# "sifra": 33,
# "artikli": [{
# "sifra": 350497,
# "naslov": "Ana Karenjina",
# "autor": "Tolstoj",
# "isbn": "456372839",
# "izdavac": "Laguna",
# "broj strana": "213",
# "godina": 2020,
# "cena": 1899.1,
# "kategorija": "Roman"
# }],
# "nova cena": 1800.0,
# "datum_vazenja": "27.12.2020."
# }
# for akcija in akcije:
# sifra = akcije['sifra']
# sifra += 1
# nova_akcija['sifra'] = sifra
# akcija_knjige = []
# while True:
# prompt = 0
# breaker = 0
# sifra = input("Sacuvaj sifru (ukucaj 'nazad' za povratak):")
# if (knjige.find(sifra) != None and sifra != ''):
# knjiga1 = knjiga.find(sifra)
# print('Knjiga je pronadjena.')
# prompt = 1
# print('Knjiga se dodaje u akcije:')
# knjige = [knjiga1]
# knjige.permissions('a')
# knjige.list(knjige)
# knjige.permissions('m')
# while True:
# print('\nZelite li da nastavite\n1. Da\n2. Odustani')
# stavka = input('Unesi:')
# if stavka == '1':
# while True:
# try:
# price = float(input('New price for the book:'))
# knjiga1['cena'] = cena
# break
# except ValueError:
# print('Pogresan unos, pokusajte ponovo.')
# akcija_knjige.append(knjiga1)
# break
# elif stavka == '2':
# prompt = 0
# sifra = 'a'
# break
# else:
# print('Pogresan unos. Pokusajte ponovo.')
# elif (sifra == 'nazad'):
# return False
# else:
# print('Pogresan unos. Pokusajte ponovo.')
# if (prompt == 1):
# while True:
# print('\nZelite li da unesete jos neku knjigu u akciju\n1. Da\n2. Ne')
# stavka = input('Unesi:')
# if stavka == '1':
# break
# elif stavka == '2':
# breaker = 1
# break
# else:
# print('Pogresan unos. Pokusajte ponovo.')
# if (breaker == 1 and akcija_knjige != []): break
# nova_akcija['artikli'] = akcija_knjige
#
# while True:
# try:
# godina = int(input('Godina izdanja:'))
# izdanje = date(godina, 1, 1)
# break
# except ValueError:
# print('Pogresan unos. Pokusajte ponovo.')
# while True:
# try:
# mesec = int(input('Mesec:'))
# expiry = date(godina, mesec, 1)
# break
# except ValueError:
# print('Pogresan unos. Pokusajte ponovo.')
# while True:
# try:
# dan = int(input('Dan:'))
# expiry = date(godina, mesec, dan)
# break
# except ValueError:
# print('Pogresan unos. Pokusajte ponovo.')
# nova_akcija['izdanje'] = str(izdanje)
# print('\nAkcija se dodaje.')
# nova_akcija = [nova_akcija]
# show_valid = False
# ispisi_akcije(akcije)
#
# while True:
# print('\nZelite li da nastavite?\n1. Da\n2. Odustani')
# stavka = input('Unesi:')
# if stavka == '1':
# akcije.append(nova_akcija)
# break
# elif stavka == '2':
# return False
# else:
# print('Pogresan unos. Pokusajte ponovo.')
# save(akcije)
# print('%s je dodata u bazu podataka. Sifra akcije =[%s]' %(nova_akcija['naslov'], nova_akcija['sifra']))
# return False
| [
"kovacevic.bi34.2020@uns.ac.rs"
] | kovacevic.bi34.2020@uns.ac.rs |
830a140f3af9cb75dd17cf22df4d0529f9709007 | 8f1673c2abfed8f372e22fbd1c280486014b4466 | /nmt/embeddings/fresh_embedding_test.py | 02c77c2260cfcd5f01d846f377761ea8db571074 | [
"Apache-2.0"
] | permissive | naivenlp/naivenmt-legacy | be670df40a98c0f28bdacb2a3acf9a5b06667966 | bcceeec0a477eb09c4a8915e638a27dae6c95562 | refs/heads/master | 2021-10-27T02:55:33.160837 | 2019-04-15T14:39:06 | 2019-04-15T14:39:06 | 118,464,831 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import tensorflow as tf
import numpy as np
from nmt.embeddings.fresh_embedding import FreshEmbedding
from nmt import misc_utils
class FreshEmbeddingTest(tf.test.TestCase):
def testFreshEmbedding(self):
vocab_file = misc_utils.get_test_data('iwslt15.vocab.100.en')
embedder = FreshEmbedding(vocab_file=vocab_file)
inputs = np.array([
['I', 'am', 'a', 'test']
])
inputs = tf.constant(inputs,dtype=tf.string)
length = np.array([4])
length = tf.constant(length,dtype=tf.int32)
params = {
'batch_size': 1
}
embedded = embedder.embedding(inputs, length, params)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
embedded = sess.run(embedded)
print(embedded)
if __name__ == '__main__':
tf.test.main()
| [
"zhouyang.luo@gmail.com"
] | zhouyang.luo@gmail.com |
6afdae640dd9ad3d9adbf1cbc0c7d8cf8b7d3466 | 491c1e520a64e3ebd5349130f35047aaed1e70ec | /two pointer/680 validPalindrome.py | 3ccf25be7a1c357ec82bfd31b9cc88e976d594fb | [] | no_license | pangyouzhen/data-structure | 33a7bd7790c8db3e018114d85a137f5f3d6b92f8 | cd46cf08a580c418cc40a68bf9b32371fc69a803 | refs/heads/master | 2023-05-26T12:02:30.800301 | 2023-05-21T08:07:57 | 2023-05-21T08:07:57 | 189,315,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | class Solution:
def validPalindrome(self, s: str) -> bool:
def checkPalindrome(low, high):
i, j = low, high
while i < j:
if s[i] != s[j]:
return False
i = i + 1
j = j - 1
return True
low, high = 0, len(s) - 1
while low < high:
if s[low] == s[high]:
low = low + 1
high = high - 1
else:
return checkPalindrome(low + 1, high) or checkPalindrome(low, high - 1)
return True
sol = Solution()
print(sol.validPalindrome("abca"))
assert sol.validPalindrome("abca") == True
print(sol.validPalindrome("abcca"))
| [
"pangyouzhen@live.com"
] | pangyouzhen@live.com |
87116af50b6c30c374ade0f7be56252673c0f14a | 9101d9e1a4e9ea23f8043090704ada6f87295bfe | /optunity/search_spaces.py | 9227b2377eb2a7539a95bd41d2cc35ebb0ea1362 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | gitter-badger/optunity | 2edacb3dfcd9cb21f720ccbd36488e2009637259 | 4094650aeb1bf00506e1f2acf383b5c8f7d596b5 | refs/heads/master | 2020-07-13T05:37:46.615450 | 2015-07-17T17:09:51 | 2015-07-17T17:09:51 | 39,302,683 | 0 | 0 | null | 2015-07-18T15:24:17 | 2015-07-18T15:24:17 | null | UTF-8 | Python | false | false | 11,445 | py | #! /usr/bin/env python
# Copyright (c) 2014 KU Leuven, ESAT-STADIUS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Functionality to deal with exotic search spaces, with conditional hyperparameters.
A search space is defined as a dictionary mapping strings to nodes. Each node is one of the following:
1. A new sub search space, that is a new dictionary with the same structure.
2. 2-element list or tuple, containing (lb, ub) for the associated hyperparameter.
3. None, to indicate a terminal node that has no numeric value associated to it.
A simple example search space is that for SVM, where we want to optimize the kernel function and (optionally)
its hyperparameterization.
.. code:
search = {'kernel': {'linear': None,
'rbf': {'gamma': [0, 1]},
'poly': {'degree': [2, 4]}
},
'c': [0, 1]
}
Alternatively, it is possible to optimize the regularization parameter 'c' for every choice of kernel separately,
as its required range may be different.
.. code:
search = {'kernel': {'linear': {'c': [0, 1]},
'rbf': {'gamma': [0, 1], 'c': [0, 2]},
'poly': {'degree': [2, 4], 'c': [0, 3]}
}
}
Main features in this module:
TODO
* :func:`logged`
* :func:`max_evals`
.. moduleauthor:: Marc Claesen
"""
import functools
import itertools
import collections
import math
DELIM = '|'
class Options(object):
def __init__(self, cases):
self._cases = cases
@property
def cases(self): return self._cases
def __iter__(self):
for case in self.cases:
yield case
def __repr__(self): return "{%s}" % ", ".join(self.cases)
def __len__(self): return len(self.cases)
def __getitem__(self, idx): return self.cases[idx]
class Node(object):
"""Models a node within a search space.
Nodes can be internal or terminal, and may or may not be choices.
A choice is a node that models a discrete choice out of k > 1 options.
"""
def __init__(self, key, value):
self._key = key
if type(value) is dict:
self._value = [Node(k, v) for k, v in sorted(value.items())]
else: self._value = value
@property
def key(self): return self._key
@property
def value(self): return self._value
@property
def terminal(self):
"""Returns whether or not this Node is terminal.
A terminal node has a non-dictionary value (numeric, list or None).
"""
if self.value:
return not type(self.value[0]) == type(self)
return True
@property
def choice(self):
"""Determines whether this node is a choice.
A choice is a node that models a discrete choice out of k > 1 options.
"""
return self.value is None
def __iter__(self):
"""Iterates over this node.
If the node is terminal, yields the key and value.
Otherwise, first yields all values and then iterates over the values.
"""
if self.terminal:
yield self.key, self.value
else:
value = list(itertools.chain(*self.value))
if any([not x.terminal or x.choice for x in self.value]):
value.insert(0, ([], Options([node.key for node in self.value])))
for k, v in value:
if type(k) is list: key = [self.key] + k
else: key = [self.key, k]
yield key, v
class SearchTree(object):
"""Tree structure to model a search space.
Fairly elaborate unit test.
>>> space = {'a': {'b0': {'c0': {'d0': {'e0': [0, 10], 'e1': [-2, -1]},
... 'd1': {'e2': [-3, -1]},
... 'd2': None
... },
... 'c1': [0.0, 1.0],
... },
... 'b1': {'c2': [-2.0, -1.0]},
... 'b2': None
... }
... }
>>> tree = SearchTree(space)
>>> b = tree.to_box()
>>> print(b['a'] == [0.0, 3.0] and
... b['a|b0|c0'] == [0.0, 3.0] and
... b['a|b0|c1'] == [0.0, 1.0] and
... b['a|b1|c2'] == [-2.0, -1.0] and
... b['a|b0|c0|d0|e0'] == [0, 10] and
... b['a|b0|c0|d0|e1'] == [-2, -1] and
... b['a|b0|c0|d1|e2'] == [-3, -1])
True
>>> d = tree.decode({'a': 2.5})
>>> d['a'] == 'b2'
True
>>> d = tree.decode({'a': 1.5, 'a|b1|c2': -1.5})
>>> print(d['a'] == 'b1' and
... d['c2'] == -1.5)
True
>>> d = tree.decode({'a': 0.5, 'a|b0|c0': 1.7, 'a|b0|c0|d1|e2': -1.2})
>>> print(d['a'] == 'b0' and
... d['c0'] == 'd1' and
... d['e2'] == -1.2)
True
>>> d = tree.decode({'a': 0.5, 'a|b0|c0': 2.7})
>>> print(d['a'] == 'b0' and
... d['c0'] == 'd2')
True
>>> d = tree.decode({'a': 0.5, 'a|b0|c0': 0.7, 'a|b0|c0|d0|e0': 2.3, 'a|b0|c0|d0|e1': -1.5})
>>> print(d['a'] == 'b0' and
... d['c0'] == 'd0' and
... d['e0'] == 2.3 and
... d['e1'] == -1.5)
True
"""
def __init__(self, d):
self._content = [Node(k, v) for k, v in sorted(d.items())]
self._vectordict = collections.OrderedDict()
self._vectorcontent = collections.OrderedDict()
@property
def vectordict(self): return self._vectordict
@property
def vectorcontent(self): return self._vectorcontent
@property
def content(self): return self._content
def __iter__(self):
for i in self.content:
for k, v in i:
yield k, v
def to_box(self):
if not self.vectordict:
for k, v in self:
key = DELIM.join(k)
if type(v) is Options:
if len(v) > 1: # options of length one aren't really options
self.vectordict[key] = [0.0, float(len(v))]
self.vectorcontent[key] = v
elif v is None:
pass
else:
self.vectordict[key] = v
self.vectorcontent[key] = v
return dict([(k, v) for k, v in self.vectordict.items()])
def decode(self, vd):
result = {}
currently_decoding_nested = []
items = sorted(vd.items())
idx = 0
while idx < len(items):
k, v = items[idx]
keylist = k.split(DELIM)
if currently_decoding_nested and len(keylist) >= len(currently_decoding_nested):
if not all(map(lambda t: t[0] == t[1], zip(currently_decoding_nested, keylist))):
# keylist doesnt match what we are currently decoding
# and it is longer than what we are decoding
# so this must be the wrong key, skip it
idx += 1
# add a None value for this particular function argument
# this is required to keep call logs functional
key = keylist[-1]
if not key in result: result[key] = None
continue
elif currently_decoding_nested:
# keylist is shorter than decoding list -> move up one nesting level
currently_decoding_nested = currently_decoding_nested[:-2]
continue
content = self.vectorcontent[k]
if type(content) is Options:
# determine which option to use
# this is done by checking in which partition the current value
# of the choice parameter (v) falls
option_idx = int(math.floor(v))
option = content[option_idx]
result[DELIM.join(keylist[len(currently_decoding_nested):])] = option
currently_decoding_nested.extend([keylist[-1], option])
idx += 1
else:
result[keylist[-1]] = v
idx += 1
return result
def wrap_decoder(self, f):
"""Wraps a function to automatically decode arguments based on given SearchTree."""
@functools.wraps(f)
def wrapped(**kwargs):
decoded = self.decode(kwargs)
return f(**decoded)
return wrapped
#hpars = {'kernel': {'linear': {'c': [0, 1]},
# 'rbf': {'gamma': [0, 1], 'c': [0, 10]},
# 'poly': {'degree': [2, 4], 'c': [0, 2]}
# }
# }
#hpars = {'algorithm': {'k-nn': {'k': [1, 10]},
# 'SVM': {'kernel': {'linear': {'C': [0, 2]},
# 'rbf': {'gamma': [0, 1], 'C': [0, 10]},
# 'poly': {'degree': [2, 5], 'C': [0, 50], 'coef0': [0, 1]}
# }
# },
# 'naive-bayes': None,
# 'random-forest': {'n_estimators': [100, 300], 'max_features': [5, 100]}
# }
# }
#hpars = {'kernel': {'linear': None,
# 'rbf': {'gamma': [0, 1]},
# 'poly': {'degree': [2, 4]}
# },
# 'c': [0, 1]
# }
#hpars = {'kernel': {'linear': {'c': [0, 1]},
# 'rbf': {'gamma': [0, 1], 'c': [0, 10]},
# 'poly': {'degree': [2, 4], 'c': [0, 2]},
# 'choice': {'choice1': None, 'choice2': None}
# }
# }
#tree.decode(v2)
#tree = SearchTree(hpars)
#l = list(tree)
#print('============================')
#print('list')
#print("\n".join(map(str, l)))
#v = tree.to_box()
#print('============================')
#print('box')
#print("\n".join(map(str, v.items())))
#v2 = v.copy()
#v2['kernel'] = 3.5
#v2['kernel-choice'] = 0.2
| [
"claesenm@gmail.com"
] | claesenm@gmail.com |
f24c4049415d4f03f01b2917ce79cf044672f535 | c7495a8cd3f04b703b97e23fea129e3c78e55c33 | /crawler.py | 464dca74d5c15af902ce7a2cac0de304d36452c8 | [] | no_license | zhuyul/DoubanCrawler | 8ca0ed164d3fb195552a2789e4052259fb48b602 | 7df36274031fa195671555922650505022973046 | refs/heads/master | 2020-04-13T23:28:55.753983 | 2018-12-31T15:16:04 | 2018-12-31T15:16:04 | 163,507,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,062 | py | # encoding:utf-8
from selenium import webdriver
import csv
def main():
options = webdriver.ChromeOptions()
options.add_argument("headless")
driver = webdriver.Chrome(chrome_options=options)
url = inputURL(driver)
print("Fetching data ...")
commentDict = crawler(driver, url)
writeToCsv(commentDict)
print("Done")
def inputURL(driver):
while not driver.find_elements_by_class_name("item"):
user = input("Type Douban user name: ")
url = "https://movie.douban.com/people/" + user + "/collect"
driver.get(url)
return url
def crawler(driver, url):
commentDict = {}
endPage = False
while not endPage:
driver.get(url)
length = len (driver.find_elements_by_class_name("item"))
for i in range (0, length):
movie_list = driver.find_elements_by_class_name("item")
movie = movie_list[i]
title = movie.find_element_by_css_selector("[class='title']").text.rstrip(" [可播放]")
fetchComments(movie, title, commentDict)
try:
url = driver.find_element_by_css_selector("[rel='next']").get_attribute("href")
except:
endPage = True
driver.close()
return commentDict
def fetchComments(movie, title, commentDict):
try:
comment = movie.find_element_by_css_selector("[class='comment']").text
commentDict[title] = comment
except:
pass
def writeToCsv(data):
csv_file = open("UserComments.csv","w",newline="")
writer = csv.writer(csv_file)
writer.writerow(["Title","Comment"])
for key, value in data.items():
writer.writerow([key,value])
csv_file.close()
# NOT USED
# fetch director and genre info on each movie page
def fetchMoviePage(driver, title, movie, genreDict, directorDict):
movieURL = movie.find_element_by_css_selector("[class='title']").find_element_by_tag_name("a").get_attribute('href')
driver.get(movieURL)
movieInfo = driver.find_elements_by_class_name("attrs")
try:
directors = movieInfo[0].text
director = directors.split(' / ')
for di in director:
if di not in directorDict:
directorDict[di] = 1
else:
directorDict[di] += 1
movieGenres = driver.find_elements_by_css_selector("[property='v:genre']")
for movieGenre in movieGenres:
genre = movieGenre.text
if genre not in genreDict:
genreDict[genre] = 1
else:
genreDict[genre] += 1
driver.back()
except:
driver.back()
def findTop(myDict):
del myDict['']
for key in myDict.keys():
myDict[key] = int(myDict[key])
top = max(myDict.items(), key=lambda k: k[1])
return top
def fetchUserFiveStarMovie(movie, title, movieURL, fiveStarMoviesDict):
try:
if movie.find_element_by_css_selector("[class='rating5-t']"):
fiveStarMoviesDict[title] = movieURL
except:
pass
if __name__ == '__main__':
main() | [
"zhuyul@uci.edu"
] | zhuyul@uci.edu |
497d9f49fe787e753c65fc9bdd2556294660243e | 72cf9558c7b33c1d67a597d4f67882303b2ec76c | /TerminalGames/find_the_name.py | de52980efc183340cd86d1cf24996fd82b425b20 | [] | no_license | PRATHAM1ST/Python-Projects | 127a4a98afc1f9f2036fecda99cf57d52131e094 | f4b99f55cfb2bc8da65d6c2968583e561f3393c7 | refs/heads/main | 2023-05-02T10:16:53.307763 | 2021-05-26T17:14:54 | 2021-05-26T17:14:54 | 371,111,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,832 | py | from random import *
################################################################################
def split(word):
return [char for char in word]
def seperator(list1, list2):
for i in range(len(list1)):
for a in range(len(list1)):
if i <= a:
if list1[i] != 'A' and list1[i] != 'E' and list1[i] != 'O' and list1[i] != 'I' and list1[i] != 'U' and list1[i] != ' ' and list1[i] != '|' and list1[a] != 'A' and list1[a] != 'E' and list1[a] != 'O' and list1[a] != 'I' and list1[a] != 'U' and list1[a] != ' ' and list1[a] != '|':
if list1[i] == list1[a]:
list2.append(list1[i])
break
else:
list2.append(list1[a])
break
if i == 0:
if list1[i] != 'A' and list1[i] != 'E' and list1[i] != 'O' and list1[i] != 'I' and list1[i] != 'U' and list1[i] != ' ' and list1[i] != '|' and list1[a] != 'A' and list1[a] != 'E' and list1[a] != 'O' and list1[a] != 'I' and list1[a] != 'U' and list1[a] != ' ' and list1[a] != '|':
if list1[i] == list1[a]:
list2.append(list1[i])
break
else:
list2.append(list1[i])
break
LIST_1 = duplicate_finder(list2)
for i in range(len(LIST_1)):
remover(list2, LIST_1[i])
list2 += LIST_1
def duplicate_finder(list):
LIST= []
for i in range(len(list)):
for a in range(len(list)):
if i < a:
if list[i] == list[a]:
LIST.append(list[i])
break
x = 0
y = 0
while (x + 1):
if x < len(LIST):
while (y + 1):
if y < len(LIST):
if x < y:
if LIST[x] == LIST[y]:
LIST.remove(LIST[y])
else:
break
y += 1
else:
break
x += 1
return LIST
def remover(list, input):
x = 0
while (x + 1):
if x < len(list):
if list[x] == input:
list.remove(list[x])
else:
return
x += 1
def rules():
print('')
print('Rules Are Simple, You Will Be Provided By:')
print('1. Question With Gaps And Vowels Written.')
print('2. You Just Have To Write A Letter Or The Number Which You Have Gussed.')
print('3. NINE Tries Will Be Given To You.')
print('4. At Last TWO Free Hints Are Given To You To Find The Name. For Applying Hint Type Hint.')
print('5. Each Time You Write Hint No. Of Hints Get Deducted, Even If You Have Used It Earlier. So Try To Avoid Such Things.')
print('6. Be Careful In Typing, As It May Cause You To Loose The No. Of Tries.')
print('')
def movies_list_type():
print('')
print('TYPES OF MOVIES AVILABLE, For Applying Any One Them Write Their Corresponding Letter')
print('BOLLYWOOD = b HOLLYWOOD = h TOLLYWOOD = t')
print("MARVEL = m DC = d EARLY 90'S = e")
print('You Can Even Add By TYping It Without Space.')
print('')
################################################################################
print(' Welcome To The World Of Finding Movies Names')
print(' So Here We Begins!')
print(' ENJOY AND GOOD LUCK!')
print('')
################################################################################
BOLLYWOOD = ['PANGA', 'STREET DANCER 3D', 'LAAL KAPTAAN', 'DABANGG 3', 'TANHAJI THE UNSUNG WARRIOR', 'GOOD NEWWZ', 'ANDHADHUN', 'ARTICLE 15', 'BAADSHAHO', 'BADHAAI HO', 'BALA', 'DREAM GIRL', 'GULLY BOY', 'JUDGEMENTALL HAI KYA', 'KESARI', 'PAGALPANTI', 'PATI PATNI AUR WOH', 'SAAHO', 'SIMMBA', 'WAR', 'JAWAANI JAANEMAN', 'MALANG', 'CHHAPAAK', 'PANIPAT', 'COMMANDO 3', 'MARJAAVAAN', 'HOUSEFULL 4', 'MADE IN CHINA', 'THE ACCIDENTAL PRIME MINISTER', 'TOTAL DHAMAAL', 'LUKA CHUPPI', 'SONCHIRIYA', 'BADLA', 'NOTEBOOK', 'GONE KESH', 'ROMEO AKBAR WALTER', 'KALANK', 'BLANK', 'STUDENT OF THE YEAR 2', 'DE DE PYAAR DE', 'BHARAT', 'KABIR SINGH', 'ARTICLE 15', 'SUPER 30', 'BATLA HOUSE', 'MISSION MANGAL', 'SAAHO', 'CHHICHHORE', 'DREAM GIRL', 'SECTION 375', 'THE ZOYA FACTOR', 'PAL PAL DIL KE PAAS', 'PRASSTHANAM', 'THE SKY IS PINK', 'SAAND KI AANKH', 'MADE IN CHINA', 'DRIVE', 'UJDA CHAMAN', 'MARDAANI 2', 'BAAGHI 2']
HOLLYWOOD = ['FAST AND FURIOUS']
DC = ['DARK KNIGHT']
TOLLYWOOD = []
EARLY_90s = []
################################################################################
MOVIES = []
################################################################################
CHOICE = input('WHICH TYPE OF MOVIE YOU WOULD LIKE TO HAVE : ')
CHOICE = CHOICE.upper()
if CHOICE == 'RULES':
rules()
elif CHOICE == 'LIST':
movies_list_type()
else:
CHOICE = split(CHOICE)
for c in range(len(CHOICE)):
if CHOICE[c] == 'B':
MOVIES += BOLLYWOOD
elif CHOICE[c] == 'H':
MOVIES += HOLLYWOOD
elif CHOICE[c] == 'T':
MOVIES += TOLLYWOOD
elif CHOICE[c] == 'M':
MOVIES += MARVEL
elif CHOICE[c] == 'D':
MOVIES += DC
elif CHOICE[c] == 'E':
MOVIES += EARLY_90s
else:
print('YOU HAVE WRITTEN IT WRONG, TRY AGAIN BY REFRESHING IT PAGE')
################################################################################
SELECTION = randint(0, len(MOVIES)-1)
SELECTED_MOVIE = MOVIES[SELECTION]
SPLIT = split(SELECTED_MOVIE)
print(SELECTED_MOVIE)
QUESTION = []
ENTERIES_NEEDED = []
TRIES_LEFT = 9
CORRECT = 0
HINTS_LEFT = 2
################################################################################
seperator(SPLIT, ENTERIES_NEEDED)
################################################################################
print('')
print('NO. OF TRIES LEFT:', TRIES_LEFT)
print('NO. OF HINTS LEFT:', HINTS_LEFT)
print('')
################################################################################
for i in range(len(SELECTED_MOVIE)):
if SELECTED_MOVIE[i] == 'A' or SELECTED_MOVIE[i] == 'E' or SELECTED_MOVIE[i] == 'I' or SELECTED_MOVIE[i] == 'O' or SELECTED_MOVIE[i] == 'U':
print(SELECTED_MOVIE[i], end = ' ')
QUESTION.append(SELECTED_MOVIE[i])
elif SELECTED_MOVIE[i] == ' ':
print('| ', end = '')
QUESTION.append('|')
SPLIT[i] = '|'
else:
print('_ ', end = '')
QUESTION.append('_')
print('')
################################################################################
INPUT = []
while TRIES_LEFT:
USER = input('WRITE YOUR GUESS : ')
USER = USER.upper()
INPUT.append(USER)
if USER != 'A' and USER != 'E' and USER != 'I' and USER != 'O' and USER != 'U' and USER != ' ':
if USER == 'HINT':
if HINTS_LEFT != 0:
print('YOUR HINT IS {} NOW YOU HAVE {} HINTS LEFT!, LETS ROCK IT BRO!'.format(ENTERIES_NEEDED[0], HINTS_LEFT - 1))
HINTS_LEFT -= 1
else:
print('SORRY YOU CANNOT USE MORE THAN 3 HINTS, TRY HARD NAD DONT LOOSE HOPE')
if QUESTION != SPLIT:
for i in range(len(SELECTED_MOVIE)):
if USER == SELECTED_MOVIE[i]:
remover(ENTERIES_NEEDED, USER)
CORRECT += 1
if CORRECT != 0:
for i in range(len(SELECTED_MOVIE)):
if USER == SELECTED_MOVIE[i] and USER != ' ':
QUESTION[i] = USER
for a in QUESTION:
print(a, end = ' ')
print('')
CORRECT = 0
elif USER != 'HINT':
TRIES_LEFT -= 1
print('YOU HAVE', TRIES_LEFT, ' TRIES LEFT, HURRY UP TRY HARD!')
print('')
if QUESTION == SPLIT:
print('')
print('YOU HAVE CRACKED THE MOVIE, HURRAY! BRO PARTY!')
break
if TRIES_LEFT == 0:
print('SORRY YOU HAVE RUN OUT OF TRIES!, BETTER LUCK NEXT TIME AND COME PREPARED WELL!')
print('')
print('THE MOVIE IS', MOVIES[SELECTION])
break
print('')
else:
print('SORRY VOWELS OR SPACES ARE NOT ALLOWED, GROW UP BUDDY!')
print('')
################################################################################
| [
"noreply@github.com"
] | noreply@github.com |
05f8026f429941abdd6ce606b334f295694c5f27 | 72f026518a27bab1d7d260914fc366cdb8559a6f | /scripts/setup.py | c4e8832eeb5e9b9f9dd28a8dbccbd2d863940b42 | [
"MIT"
] | permissive | wenlien/pyre-check | 30ca42404740517a911fba9b2e786aef38672d77 | 5d97637bacac25f0ca7659163a8617dae1c43f0e | refs/heads/master | 2023-07-06T07:39:34.156671 | 2023-06-25T17:22:01 | 2023-06-25T17:22:01 | 133,370,640 | 1 | 0 | null | 2018-05-14T14:09:04 | 2018-05-14T14:09:04 | null | UTF-8 | Python | false | false | 13,159 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This script provides a the logic used to bootstrap a local opam
switch for building Pyre by collecting all dependencies, as well
as how to configure opam and then invoke dune for various flavors
of builds.
"""
import argparse
import logging
import os
import shutil
import subprocess
import sys
from enum import Enum
from pathlib import Path
from subprocess import CalledProcessError
from tempfile import mkdtemp
from typing import Dict, List, Mapping, NamedTuple, Optional, Type
LOG: logging.Logger = logging.getLogger(__name__)
COMPILER_VERSION = "4.14.0"
DEPENDENCIES = [
"base64.3.5.1",
"core.v0.15.1",
"core_unix.v0.15.2",
"re2.v0.15.0",
"dune.3.7.1",
"yojson.2.0.2",
"jsonm.1.0.2",
"ppx_deriving_yojson.3.7.0",
"ppx_yojson_conv.v0.15.1",
"ounit2.2.2.7",
"menhir.20220210",
"lwt.5.6.1",
"lwt_ppx.2.1.0",
"ounit2-lwt.2.2.7",
"pyre-ast.0.1.9",
"mtime.1.4.0",
"errpy.0.0.8",
]
class OCamlbuildAlreadyInstalled(Exception):
pass
class OldOpam(Exception):
pass
class BuildType(Enum):
EXTERNAL = "external"
FACEBOOK = "facebook"
def _custom_linker_option(pyre_directory: Path, build_type: BuildType) -> str:
# HACK: This is a temporary workaround for inconsistent OS installations
# in FB-internal CI. Can be removed once all fleets are upgraded.
if build_type == BuildType.FACEBOOK and sys.platform == "linux":
return (
(pyre_directory / "facebook" / "scripts" / "custom_linker_options.txt")
.read_text()
.rstrip()
)
else:
return ""
class Setup(NamedTuple):
opam_root: Path
release: bool = False
def switch_name(self) -> str:
return f"{COMPILER_VERSION}+flambda" if self.release else COMPILER_VERSION
def compiler_specification(self) -> str:
"""
Command-line argument to set the compiler version in `opam switch create ...`
The format for how to specify this changed in 4.12.0, see
https://discuss.ocaml.org/t/experimental-new-layout-for-the-ocaml-variants-packages-in-opam-repository/6779
"""
if not self.release:
return COMPILER_VERSION
else:
return ",".join(
[
f"--packages=ocaml-variants.{COMPILER_VERSION}+options",
"ocaml-options-only-flambda",
]
)
@property
def environment_variables(self) -> Mapping[str, str]:
return os.environ
def produce_dune_file(
self, pyre_directory: Path, build_type: Optional[BuildType] = None
) -> None:
if not build_type:
if (pyre_directory / "facebook").is_dir():
build_type = BuildType.FACEBOOK
else:
build_type = BuildType.EXTERNAL
with open(pyre_directory / "source" / "dune.in") as dune_in:
with open(pyre_directory / "source" / "dune", "w") as dune:
dune_data = dune_in.read()
dune.write(
dune_data.replace("%VERSION%", build_type.value).replace(
"%CUSTOM_LINKER_OPTION%",
_custom_linker_option(pyre_directory, build_type),
)
)
def check_if_preinstalled(self) -> None:
if self.environment_variables.get(
"CHECK_IF_PREINSTALLED"
) != "false" and shutil.which("ocamlc"):
ocamlc_location = self.run(["ocamlc", "-where"])
test_ocamlbuild_location = Path(ocamlc_location) / "ocamlbuild"
if test_ocamlbuild_location.is_dir():
LOG.error(
"OCamlbuild will refuse to install since it is already "
+ f"present at {test_ocamlbuild_location}."
)
LOG.error("If you want to bypass this safety check, run:")
LOG.error("CHECK_IF_PREINSTALLED=false ./scripts/setup.sh")
raise OCamlbuildAlreadyInstalled
def already_initialized(self) -> bool:
return Path(self.opam_root.as_posix()).is_dir()
def validate_opam_version(self) -> None:
version = self.run(["opam", "--version"])
if version[:1] != "2":
LOG.error(
"Pyre only supports opam 2.0.0 and above, please update your "
+ "opam version."
)
raise OldOpam
def opam_environment_variables(self) -> Dict[str, str]:
LOG.info("Activating opam")
opam_env_result = self.run(
[
"opam",
"env",
"--yes",
"--switch",
self.switch_name(),
"--root",
self.opam_root.as_posix(),
"--set-root",
"--set-switch",
]
)
opam_environment_variables: Dict[str, str] = {}
# `opam env` produces lines of two forms:
# - comments like ": this comment, starts with a colon;"
# - lines defining and exporting env vars like "ENV_VAR=value; export ENV_VAR;"
for line in opam_env_result.split("\n"):
if not line.startswith(":"):
environment_variable, quoted_value = line.split(";")[0].split("=")
value = quoted_value[1:-1]
LOG.info(f'{environment_variable}="{value}"')
opam_environment_variables[environment_variable] = value
return opam_environment_variables
def initialize_opam_switch(self) -> Mapping[str, str]:
self.check_if_preinstalled()
self.validate_opam_version()
self.run(
[
"opam",
"init",
"--bare",
"--yes",
"--disable-sandboxing",
"--root",
self.opam_root.as_posix(),
"default",
"https://opam.ocaml.org",
]
)
self.run(["opam", "update", "--root", self.opam_root.as_posix()])
self.run(
[
"opam",
"switch",
"create",
self.switch_name(),
self.compiler_specification(),
"--yes",
"--root",
self.opam_root.as_posix(),
]
)
opam_environment_variables = self.opam_environment_variables()
opam_install_command = ["opam", "install", "--yes"]
if sys.platform == "linux":
# setting `--assume-depexts` means that opam will not require a "system"
# installed version of Rust (e.g. via `dnf`` or `yum`) but will instead
# accept a version referenced on the system `$PATH`
opam_install_command.append("--assume-depexts")
self.run(
opam_install_command + DEPENDENCIES,
add_environment_variables=opam_environment_variables,
)
return opam_environment_variables
def set_opam_switch_and_install_dependencies(self, rust_path: Optional[Path]) -> Mapping[str, str]:
self.run(
[
"opam",
"switch",
"set",
self.switch_name(),
"--root",
self.opam_root.as_posix(),
]
)
environment_variables = self.opam_environment_variables()
if rust_path is not None:
environment_variables["PATH"] = str(rust_path) + ":" + environment_variables["PATH"]
opam_install_command = ["opam", "install", "--yes"]
if sys.platform == "linux":
# osx fails on sandcastle with exit status 2 (illegal argument) with this.
# unable to repro locally on osx.
opam_install_command.append("--assume-depexts")
opam_install_command += DEPENDENCIES
self.run(
opam_install_command,
add_environment_variables=environment_variables
)
return environment_variables
def full_setup(
self,
pyre_directory: Path,
*,
run_tests: bool = False,
run_clean: bool = False,
build_type_override: Optional[BuildType] = None,
rust_path: Optional[Path] = None
) -> None:
opam_environment_variables: Mapping[
str, str
] = self.set_opam_switch_and_install_dependencies(rust_path=rust_path)
def run_in_opam_environment(command: List[str]) -> None:
self.run(
command,
current_working_directory=pyre_directory / "source",
add_environment_variables=opam_environment_variables,
)
self.produce_dune_file(pyre_directory, build_type_override)
if run_clean:
# Note: we do not run `make clean` because we want the result of the
# explicit `produce_dune_file` to remain.
# Dune 3.7 runs into `rmdir` failure when cleaning the `_build` directory
# for some reason. Manually clean the dir to work around the issue.
run_in_opam_environment(["rm", "-rf", "_build"])
if self.release:
LOG.info("Running a release build. This may take a while.")
run_in_opam_environment(["make", "release"])
if run_tests:
run_in_opam_environment(["make", "release_test"])
else:
run_in_opam_environment(["make", "dev"])
if run_tests:
run_in_opam_environment(["make", "test"])
def run(
self,
command: List[str],
current_working_directory: Optional[Path] = None,
add_environment_variables: Optional[Mapping[str, str]] = None,
) -> str:
if add_environment_variables:
environment_variables = {
**self.environment_variables,
**add_environment_variables,
}
else:
environment_variables = self.environment_variables
LOG.info(command)
try:
output = subprocess.check_output(
command,
universal_newlines=True,
cwd=current_working_directory,
env=environment_variables,
)
except CalledProcessError as called_process_error:
LOG.info(f'Command: {command} returned non zero exit code.\n\
stdout: {called_process_error.stdout}\n\
stderr: {called_process_error.stderr}')
raise called_process_error
if output.endswith("\n"):
return output[:-1]
else:
return output
def _make_opam_root(local: bool, temporary_root: bool, default: Optional[Path]) -> Path:
home = Path.home()
home_opam = home / ".opam"
if local:
if not home_opam.is_dir():
local_opam = home / "local" / "opam"
local_opam.parent.mkdir(parents=True, exist_ok=True)
local_opam.symlink_to(home_opam, target_is_directory=True)
return home_opam
if temporary_root:
return Path(mkdtemp())
return default or home_opam
def setup(runner_type: Type[Setup]) -> None:
logging.basicConfig(
level=logging.INFO, format="[%(asctime)s] [%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(description="Set up Pyre.")
parser.add_argument("--pyre-directory", type=Path)
parser.add_argument("--local", action="store_true")
parser.add_argument("--temporary_root", action="store_true")
parser.add_argument("--opam-root", type=Path)
parser.add_argument("--configure", action="store_true")
parser.add_argument("--environment-only", action="store_true")
parser.add_argument("--release", action="store_true")
parser.add_argument("--build-type", type=BuildType)
parser.add_argument("--no-tests", action="store_true")
parser.add_argument("--rust-path", type=Path)
parsed = parser.parse_args()
pyre_directory = parsed.pyre_directory
if not pyre_directory:
pyre_directory = Path(__file__).parent.parent.absolute()
opam_root = _make_opam_root(parsed.local, parsed.temporary_root, parsed.opam_root)
runner = runner_type(opam_root=opam_root, release=parsed.release)
if parsed.configure:
runner.produce_dune_file(pyre_directory, parsed.build_type)
elif parsed.environment_only:
runner.produce_dune_file(pyre_directory, parsed.build_type)
runner.initialize_opam_switch()
LOG.info("Environment built successfully, stopping here as requested.")
else:
if not runner.already_initialized():
runner.initialize_opam_switch()
runner.full_setup(
pyre_directory,
run_tests=not parsed.no_tests,
build_type_override=parsed.build_type,
rust_path=parsed.rust_path
)
if __name__ == "__main__":
setup(Setup)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
9f912bb682f7b967a2fb11f58daaf4b50d5476bc | d585ba3d0891e8f4d091091007c34a61896b3f26 | /TestPackages/MyFirstButton.py | 4c0ed627baefd85a96cf6dd0efa090ab6c80f8f8 | [] | no_license | Bjoneskc/UMKC-Python1 | a1b4c5194bad93d3636190dff02995e3f20f1053 | e1686d1c5129c18262c2b272764477b5be00944f | refs/heads/master | 2016-09-06T08:33:23.721535 | 2014-03-01T04:44:28 | 2014-03-01T04:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | __author__ = 'bjone'
import sys
from tkinter import *
widget = Button(None, text='Click Me', command=sys.exit)
widget.pack()
widget.mainloop() | [
"bjoneskc01@gmail.com"
] | bjoneskc01@gmail.com |
7476db67d7a9a1e950e4e9ac795f4d9a8fc1af7d | bdf647d2f626578aa447258b7529f4acfdb2cfba | /tencentcloud/tke/v20180525/models.py | 26b20dc4ac138734595287fda29734009f79bad4 | [
"Apache-2.0"
] | permissive | psixdp/tencentcloud-sdk-python | 2c93528b9d7a5cec1fa38c3dd140a277abf8d26e | 7e0ec01ebb50cbfb92c60ed1f29a59b77199ccea | refs/heads/master | 2020-06-30T22:12:23.331433 | 2019-08-02T03:16:09 | 2019-08-02T03:16:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,176 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AddExistedInstancesRequest(AbstractModel):
"""AddExistedInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceIds: 实例列表
:type InstanceIds: list of str
:param InstanceAdvancedSettings: 实例额外需要设置参数信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
:param EnhancedService: 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。
:type EnhancedService: :class:`tencentcloud.tke.v20180525.models.EnhancedService`
:param LoginSettings: 节点登录信息(目前仅支持使用Password或者单个KeyIds)
:type LoginSettings: :class:`tencentcloud.tke.v20180525.models.LoginSettings`
:param SecurityGroupIds: 实例所属安全组。该参数可以通过调用 DescribeSecurityGroups 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。(目前仅支持设置单个sgId)
:type SecurityGroupIds: list of str
"""
self.ClusterId = None
self.InstanceIds = None
self.InstanceAdvancedSettings = None
self.EnhancedService = None
self.LoginSettings = None
self.SecurityGroupIds = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIds = params.get("InstanceIds")
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
if params.get("EnhancedService") is not None:
self.EnhancedService = EnhancedService()
self.EnhancedService._deserialize(params.get("EnhancedService"))
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.SecurityGroupIds = params.get("SecurityGroupIds")
class AddExistedInstancesResponse(AbstractModel):
"""AddExistedInstances返回参数结构体
"""
def __init__(self):
"""
:param FailedInstanceIds: 失败的节点ID
:type FailedInstanceIds: list of str
:param SuccInstanceIds: 成功的节点ID
:type SuccInstanceIds: list of str
:param TimeoutInstanceIds: 超时未返回出来节点的ID(可能失败,也可能成功)
:type TimeoutInstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FailedInstanceIds = None
self.SuccInstanceIds = None
self.TimeoutInstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.FailedInstanceIds = params.get("FailedInstanceIds")
self.SuccInstanceIds = params.get("SuccInstanceIds")
self.TimeoutInstanceIds = params.get("TimeoutInstanceIds")
self.RequestId = params.get("RequestId")
class Cluster(AbstractModel):
"""集群信息结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param ClusterName: 集群名称
:type ClusterName: str
:param ClusterDescription: 集群描述
:type ClusterDescription: str
:param ClusterVersion: 集群版本(默认值为1.10.5)
:type ClusterVersion: str
:param ClusterOs: 集群系统。centos7.2x86_64 或者 ubuntu16.04.1 LTSx86_64,默认取值为ubuntu16.04.1 LTSx86_64
:type ClusterOs: str
:param ClusterType: 集群类型,托管集群:MANAGED_CLUSTER,独立集群:INDEPENDENT_CLUSTER。
:type ClusterType: str
:param ClusterNetworkSettings: 集群网络相关参数
:type ClusterNetworkSettings: :class:`tencentcloud.tke.v20180525.models.ClusterNetworkSettings`
:param ClusterNodeNum: 集群当前node数量
:type ClusterNodeNum: int
:param ProjectId: 集群所属的项目ID
:type ProjectId: int
"""
self.ClusterId = None
self.ClusterName = None
self.ClusterDescription = None
self.ClusterVersion = None
self.ClusterOs = None
self.ClusterType = None
self.ClusterNetworkSettings = None
self.ClusterNodeNum = None
self.ProjectId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.ClusterDescription = params.get("ClusterDescription")
self.ClusterVersion = params.get("ClusterVersion")
self.ClusterOs = params.get("ClusterOs")
self.ClusterType = params.get("ClusterType")
if params.get("ClusterNetworkSettings") is not None:
self.ClusterNetworkSettings = ClusterNetworkSettings()
self.ClusterNetworkSettings._deserialize(params.get("ClusterNetworkSettings"))
self.ClusterNodeNum = params.get("ClusterNodeNum")
self.ProjectId = params.get("ProjectId")
class ClusterAdvancedSettings(AbstractModel):
"""集群高级配置
"""
def __init__(self):
"""
:param IPVS: 是否启用IPVS
:type IPVS: bool
:param AsEnabled: 是否启用集群节点扩缩容
:type AsEnabled: bool
:param ContainerRuntime: 集群使用的runtime类型,包括"docker"和"containerd"两种类型,默认为"docker"
:type ContainerRuntime: str
"""
self.IPVS = None
self.AsEnabled = None
self.ContainerRuntime = None
def _deserialize(self, params):
self.IPVS = params.get("IPVS")
self.AsEnabled = params.get("AsEnabled")
self.ContainerRuntime = params.get("ContainerRuntime")
class ClusterBasicSettings(AbstractModel):
"""描述集群的基本配置信息
"""
def __init__(self):
"""
:param ClusterOs: 集群系统。centos7.2x86_64 或者 ubuntu16.04.1 LTSx86_64,默认取值为ubuntu16.04.1 LTSx86_64
:type ClusterOs: str
:param ClusterVersion: 集群版本,默认值为1.10.5
:type ClusterVersion: str
:param ClusterName: 集群名称
:type ClusterName: str
:param ClusterDescription: 集群描述
:type ClusterDescription: str
:param VpcId: 私有网络ID,形如vpc-xxx。创建托管空集群时必传。
:type VpcId: str
:param ProjectId: 集群内新增资源所属项目ID。
:type ProjectId: int
"""
self.ClusterOs = None
self.ClusterVersion = None
self.ClusterName = None
self.ClusterDescription = None
self.VpcId = None
self.ProjectId = None
def _deserialize(self, params):
self.ClusterOs = params.get("ClusterOs")
self.ClusterVersion = params.get("ClusterVersion")
self.ClusterName = params.get("ClusterName")
self.ClusterDescription = params.get("ClusterDescription")
self.VpcId = params.get("VpcId")
self.ProjectId = params.get("ProjectId")
class ClusterCIDRSettings(AbstractModel):
"""集群容器网络相关参数
"""
def __init__(self):
"""
:param ClusterCIDR: 用于分配集群容器和服务 IP 的 CIDR,不得与 VPC CIDR 冲突,也不得与同 VPC 内其他集群 CIDR 冲突
:type ClusterCIDR: str
:param IgnoreClusterCIDRConflict: 是否忽略 ClusterCIDR 冲突错误, 默认不忽略
:type IgnoreClusterCIDRConflict: bool
:param MaxNodePodNum: 集群中每个Node上最大的Pod数量
:type MaxNodePodNum: int
:param MaxClusterServiceNum: 集群最大的service数量
:type MaxClusterServiceNum: int
"""
self.ClusterCIDR = None
self.IgnoreClusterCIDRConflict = None
self.MaxNodePodNum = None
self.MaxClusterServiceNum = None
def _deserialize(self, params):
self.ClusterCIDR = params.get("ClusterCIDR")
self.IgnoreClusterCIDRConflict = params.get("IgnoreClusterCIDRConflict")
self.MaxNodePodNum = params.get("MaxNodePodNum")
self.MaxClusterServiceNum = params.get("MaxClusterServiceNum")
class ClusterNetworkSettings(AbstractModel):
"""集群网络相关的参数
"""
def __init__(self):
"""
:param ClusterCIDR: 用于分配集群容器和服务 IP 的 CIDR,不得与 VPC CIDR 冲突,也不得与同 VPC 内其他集群 CIDR 冲突
:type ClusterCIDR: str
:param IgnoreClusterCIDRConflict: 是否忽略 ClusterCIDR 冲突错误, 默认不忽略
:type IgnoreClusterCIDRConflict: bool
:param MaxNodePodNum: 集群中每个Node上最大的Pod数量(默认为256)
:type MaxNodePodNum: int
:param MaxClusterServiceNum: 集群最大的service数量(默认为256)
:type MaxClusterServiceNum: int
:param Ipvs: 是否启用IPVS(默认不开启)
:type Ipvs: bool
:param VpcId: 集群的VPCID(如果创建空集群,为必传值,否则自动设置为和集群的节点保持一致)
:type VpcId: str
:param Cni: 网络插件是否启用CNI(默认开启)
:type Cni: bool
"""
self.ClusterCIDR = None
self.IgnoreClusterCIDRConflict = None
self.MaxNodePodNum = None
self.MaxClusterServiceNum = None
self.Ipvs = None
self.VpcId = None
self.Cni = None
def _deserialize(self, params):
self.ClusterCIDR = params.get("ClusterCIDR")
self.IgnoreClusterCIDRConflict = params.get("IgnoreClusterCIDRConflict")
self.MaxNodePodNum = params.get("MaxNodePodNum")
self.MaxClusterServiceNum = params.get("MaxClusterServiceNum")
self.Ipvs = params.get("Ipvs")
self.VpcId = params.get("VpcId")
self.Cni = params.get("Cni")
class CreateClusterInstancesRequest(AbstractModel):
"""CreateClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群 ID,请填写 查询集群列表 接口中返回的 clusterId 字段
:type ClusterId: str
:param RunInstancePara: CVM创建透传参数,json化字符串格式,详见[CVM创建实例](https://cloud.tencent.com/document/product/213/15730)接口。
:type RunInstancePara: str
:param InstanceAdvancedSettings: 实例额外需要设置参数信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
"""
self.ClusterId = None
self.RunInstancePara = None
self.InstanceAdvancedSettings = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.RunInstancePara = params.get("RunInstancePara")
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
class CreateClusterInstancesResponse(AbstractModel):
"""CreateClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param InstanceIdSet: 节点实例ID
:type InstanceIdSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.InstanceIdSet = None
self.RequestId = None
def _deserialize(self, params):
self.InstanceIdSet = params.get("InstanceIdSet")
self.RequestId = params.get("RequestId")
class CreateClusterRequest(AbstractModel):
"""CreateCluster请求参数结构体
"""
def __init__(self):
"""
:param ClusterCIDRSettings: 集群容器网络配置信息
:type ClusterCIDRSettings: :class:`tencentcloud.tke.v20180525.models.ClusterCIDRSettings`
:param ClusterType: 集群类型,托管集群:MANAGED_CLUSTER,独立集群:INDEPENDENT_CLUSTER。
:type ClusterType: str
:param RunInstancesForNode: CVM创建透传参数,json化字符串格式,详见[CVM创建实例](https://cloud.tencent.com/document/product/213/15730)接口。
:type RunInstancesForNode: list of RunInstancesForNode
:param ClusterBasicSettings: 集群的基本配置信息
:type ClusterBasicSettings: :class:`tencentcloud.tke.v20180525.models.ClusterBasicSettings`
:param ClusterAdvancedSettings: 集群高级配置信息
:type ClusterAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.ClusterAdvancedSettings`
:param InstanceAdvancedSettings: 节点高级配置信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
:param ExistedInstancesForNode: 已存在实例的配置信息
:type ExistedInstancesForNode: list of ExistedInstancesForNode
"""
self.ClusterCIDRSettings = None
self.ClusterType = None
self.RunInstancesForNode = None
self.ClusterBasicSettings = None
self.ClusterAdvancedSettings = None
self.InstanceAdvancedSettings = None
self.ExistedInstancesForNode = None
def _deserialize(self, params):
if params.get("ClusterCIDRSettings") is not None:
self.ClusterCIDRSettings = ClusterCIDRSettings()
self.ClusterCIDRSettings._deserialize(params.get("ClusterCIDRSettings"))
self.ClusterType = params.get("ClusterType")
if params.get("RunInstancesForNode") is not None:
self.RunInstancesForNode = []
for item in params.get("RunInstancesForNode"):
obj = RunInstancesForNode()
obj._deserialize(item)
self.RunInstancesForNode.append(obj)
if params.get("ClusterBasicSettings") is not None:
self.ClusterBasicSettings = ClusterBasicSettings()
self.ClusterBasicSettings._deserialize(params.get("ClusterBasicSettings"))
if params.get("ClusterAdvancedSettings") is not None:
self.ClusterAdvancedSettings = ClusterAdvancedSettings()
self.ClusterAdvancedSettings._deserialize(params.get("ClusterAdvancedSettings"))
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
if params.get("ExistedInstancesForNode") is not None:
self.ExistedInstancesForNode = []
for item in params.get("ExistedInstancesForNode"):
obj = ExistedInstancesForNode()
obj._deserialize(item)
self.ExistedInstancesForNode.append(obj)
class CreateClusterResponse(AbstractModel):
"""CreateCluster返回参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ClusterId = None
self.RequestId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.RequestId = params.get("RequestId")
class DeleteClusterInstancesRequest(AbstractModel):
"""DeleteClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceIds: 主机InstanceId列表
:type InstanceIds: list of str
:param InstanceDeleteMode: 集群实例删除时的策略:terminate(销毁实例,仅支持按量计费云主机实例) retain (仅移除,保留实例)
:type InstanceDeleteMode: str
"""
self.ClusterId = None
self.InstanceIds = None
self.InstanceDeleteMode = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIds = params.get("InstanceIds")
self.InstanceDeleteMode = params.get("InstanceDeleteMode")
class DeleteClusterInstancesResponse(AbstractModel):
"""DeleteClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteClusterRequest(AbstractModel):
"""DeleteCluster请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceDeleteMode: 集群实例删除时的策略:terminate(销毁实例,仅支持按量计费云主机实例) retain (仅移除,保留实例)
:type InstanceDeleteMode: str
"""
self.ClusterId = None
self.InstanceDeleteMode = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceDeleteMode = params.get("InstanceDeleteMode")
class DeleteClusterResponse(AbstractModel):
"""DeleteCluster返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeClusterInstancesRequest(AbstractModel):
"""DescribeClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param Offset: 偏移量,默认0
:type Offset: int
:param Limit: 最大输出条数,默认20
:type Limit: int
:param InstanceIds: 需要获取的节点实例Id列表(默认为空,表示拉取集群下所有节点实例)
:type InstanceIds: list of str
"""
self.ClusterId = None
self.Offset = None
self.Limit = None
self.InstanceIds = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceIds = params.get("InstanceIds")
class DescribeClusterInstancesResponse(AbstractModel):
"""DescribeClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 集群中实例总数
:type TotalCount: int
:param InstanceSet: 集群中实例列表
:type InstanceSet: list of Instance
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.InstanceSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("InstanceSet") is not None:
self.InstanceSet = []
for item in params.get("InstanceSet"):
obj = Instance()
obj._deserialize(item)
self.InstanceSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClusterSecurityRequest(AbstractModel):
"""DescribeClusterSecurity请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群 ID,请填写 查询集群列表 接口中返回的 clusterId 字段
:type ClusterId: str
"""
self.ClusterId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
class DescribeClusterSecurityResponse(AbstractModel):
"""DescribeClusterSecurity返回参数结构体
"""
def __init__(self):
"""
:param UserName: 集群的账号名称
:type UserName: str
:param Password: 集群的访问密码
:type Password: str
:param CertificationAuthority: 集群访问CA证书
:type CertificationAuthority: str
:param ClusterExternalEndpoint: 集群访问的地址
:type ClusterExternalEndpoint: str
:param Domain: 集群访问的域名
:type Domain: str
:param PgwEndpoint: 集群Endpoint地址
:type PgwEndpoint: str
:param SecurityPolicy: 集群访问策略组
:type SecurityPolicy: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.UserName = None
self.Password = None
self.CertificationAuthority = None
self.ClusterExternalEndpoint = None
self.Domain = None
self.PgwEndpoint = None
self.SecurityPolicy = None
self.RequestId = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
self.Password = params.get("Password")
self.CertificationAuthority = params.get("CertificationAuthority")
self.ClusterExternalEndpoint = params.get("ClusterExternalEndpoint")
self.Domain = params.get("Domain")
self.PgwEndpoint = params.get("PgwEndpoint")
self.SecurityPolicy = params.get("SecurityPolicy")
self.RequestId = params.get("RequestId")
class DescribeClustersRequest(AbstractModel):
"""DescribeClusters请求参数结构体
"""
def __init__(self):
"""
:param ClusterIds: 集群ID列表(为空时,
表示获取账号下所有集群)
:type ClusterIds: list of str
:param Offset: 偏移量,默认0
:type Offset: int
:param Limit: 最大输出条数,默认20
:type Limit: int
:param Filters: 过滤条件,当前只支持按照单个条件ClusterName进行过滤
:type Filters: list of Filter
"""
self.ClusterIds = None
self.Offset = None
self.Limit = None
self.Filters = None
def _deserialize(self, params):
self.ClusterIds = params.get("ClusterIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
class DescribeClustersResponse(AbstractModel):
"""DescribeClusters返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 集群总个数
:type TotalCount: int
:param Clusters: 集群信息列表
:type Clusters: list of Cluster
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Clusters = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Clusters") is not None:
self.Clusters = []
for item in params.get("Clusters"):
obj = Cluster()
obj._deserialize(item)
self.Clusters.append(obj)
self.RequestId = params.get("RequestId")
class DescribeExistedInstancesRequest(AbstractModel):
"""DescribeExistedInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群 ID,请填写查询集群列表 接口中返回的 ClusterId 字段(仅通过ClusterId获取需要过滤条件中的VPCID,比较状态时会使用该地域下所有集群中的节点进行比较。参数不支持同时指定InstanceIds和ClusterId。
:type ClusterId: str
:param InstanceIds: 按照一个或者多个实例ID查询。实例ID形如:ins-xxxxxxxx。(此参数的具体格式可参考API简介的id.N一节)。每次请求的实例的上限为100。参数不支持同时指定InstanceIds和Filters。
:type InstanceIds: list of str
:param Filters: 过滤条件,字段和详见[CVM查询实例](https://cloud.tencent.com/document/api/213/15728)如果设置了ClusterId,会附加集群的VPCID作为查询字段,在此情况下如果在Filter中指定了"vpc-id"作为过滤字段,指定的VPCID必须与集群的VPCID相同。
:type Filters: :class:`tencentcloud.tke.v20180525.models.Filter`
:param VagueIpAddress: 实例IP进行过滤(同时支持内网IP和外网IP)
:type VagueIpAddress: str
:param VagueInstanceName: 实例名称进行过滤
:type VagueInstanceName: str
:param Offset: 偏移量,默认为0。关于Offset的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于Limit的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。
:type Limit: int
"""
self.ClusterId = None
self.InstanceIds = None
self.Filters = None
self.VagueIpAddress = None
self.VagueInstanceName = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIds = params.get("InstanceIds")
if params.get("Filters") is not None:
self.Filters = Filter()
self.Filters._deserialize(params.get("Filters"))
self.VagueIpAddress = params.get("VagueIpAddress")
self.VagueInstanceName = params.get("VagueInstanceName")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeExistedInstancesResponse(AbstractModel):
"""DescribeExistedInstances返回参数结构体
"""
def __init__(self):
"""
:param ExistedInstanceSet: 已经存在的实例信息数组。
注意:此字段可能返回 null,表示取不到有效值。
:type ExistedInstanceSet: list of ExistedInstance
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ExistedInstanceSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ExistedInstanceSet") is not None:
self.ExistedInstanceSet = []
for item in params.get("ExistedInstanceSet"):
obj = ExistedInstance()
obj._deserialize(item)
self.ExistedInstanceSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class EnhancedService(AbstractModel):
"""描述了实例的增强服务启用情况与其设置,如云安全,云监控等实例 Agent
"""
def __init__(self):
"""
:param SecurityService: 开启云安全服务。若不指定该参数,则默认开启云安全服务。
:type SecurityService: :class:`tencentcloud.tke.v20180525.models.RunSecurityServiceEnabled`
:param MonitorService: 开启云监控服务。若不指定该参数,则默认开启云监控服务。
:type MonitorService: :class:`tencentcloud.tke.v20180525.models.RunMonitorServiceEnabled`
"""
self.SecurityService = None
self.MonitorService = None
def _deserialize(self, params):
if params.get("SecurityService") is not None:
self.SecurityService = RunSecurityServiceEnabled()
self.SecurityService._deserialize(params.get("SecurityService"))
if params.get("MonitorService") is not None:
self.MonitorService = RunMonitorServiceEnabled()
self.MonitorService._deserialize(params.get("MonitorService"))
class ExistedInstance(AbstractModel):
"""已经存在的实例信息
"""
def __init__(self):
"""
:param Usable: 实例是否支持加入集群(TRUE 可以加入 FALSE 不能加入)。
注意:此字段可能返回 null,表示取不到有效值。
:type Usable: bool
:param UnusableReason: 实例不支持加入的原因。
注意:此字段可能返回 null,表示取不到有效值。
:type UnusableReason: str
:param AlreadyInCluster: 实例已经所在的集群ID。
注意:此字段可能返回 null,表示取不到有效值。
:type AlreadyInCluster: str
:param InstanceId: 实例ID形如:ins-xxxxxxxx。
:type InstanceId: str
:param InstanceName: 实例名称。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param PrivateIpAddresses: 实例主网卡的内网IP列表。
注意:此字段可能返回 null,表示取不到有效值。
:type PrivateIpAddresses: list of str
:param PublicIpAddresses: 实例主网卡的公网IP列表。
注意:此字段可能返回 null,表示取不到有效值。
:type PublicIpAddresses: list of str
:param CreatedTime: 创建时间。按照ISO8601标准表示,并且使用UTC时间。格式为:YYYY-MM-DDThh:mm:ssZ。
注意:此字段可能返回 null,表示取不到有效值。
:type CreatedTime: str
:param InstanceChargeType: 实例计费模式。取值范围:
PREPAID:表示预付费,即包年包月
POSTPAID_BY_HOUR:表示后付费,即按量计费
CDHPAID:CDH付费,即只对CDH计费,不对CDH上的实例计费。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceChargeType: str
:param CPU: 实例的CPU核数,单位:核。
注意:此字段可能返回 null,表示取不到有效值。
:type CPU: int
:param Memory: 实例内存容量,单位:GB。
注意:此字段可能返回 null,表示取不到有效值。
:type Memory: int
:param OsName: 操作系统名称。
注意:此字段可能返回 null,表示取不到有效值。
:type OsName: str
:param InstanceType: 实例机型。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceType: str
"""
self.Usable = None
self.UnusableReason = None
self.AlreadyInCluster = None
self.InstanceId = None
self.InstanceName = None
self.PrivateIpAddresses = None
self.PublicIpAddresses = None
self.CreatedTime = None
self.InstanceChargeType = None
self.CPU = None
self.Memory = None
self.OsName = None
self.InstanceType = None
def _deserialize(self, params):
self.Usable = params.get("Usable")
self.UnusableReason = params.get("UnusableReason")
self.AlreadyInCluster = params.get("AlreadyInCluster")
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.PrivateIpAddresses = params.get("PrivateIpAddresses")
self.PublicIpAddresses = params.get("PublicIpAddresses")
self.CreatedTime = params.get("CreatedTime")
self.InstanceChargeType = params.get("InstanceChargeType")
self.CPU = params.get("CPU")
self.Memory = params.get("Memory")
self.OsName = params.get("OsName")
self.InstanceType = params.get("InstanceType")
class ExistedInstancesForNode(AbstractModel):
"""不同角色的已存在节点配置参数
"""
def __init__(self):
"""
:param NodeRole: 节点角色,取值:MASTER_ETCD, WORKER。MASTER_ETCD只有在创建 INDEPENDENT_CLUSTER 独立集群时需要指定。
:type NodeRole: str
:param ExistedInstancesPara: 已存在实例的重装参数
:type ExistedInstancesPara: :class:`tencentcloud.tke.v20180525.models.ExistedInstancesPara`
"""
self.NodeRole = None
self.ExistedInstancesPara = None
def _deserialize(self, params):
self.NodeRole = params.get("NodeRole")
if params.get("ExistedInstancesPara") is not None:
self.ExistedInstancesPara = ExistedInstancesPara()
self.ExistedInstancesPara._deserialize(params.get("ExistedInstancesPara"))
class ExistedInstancesPara(AbstractModel):
"""已存在实例的重装参数
"""
def __init__(self):
"""
:param InstanceIds: 集群ID
:type InstanceIds: list of str
:param InstanceAdvancedSettings: 实例额外需要设置参数信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
:param EnhancedService: 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。
:type EnhancedService: :class:`tencentcloud.tke.v20180525.models.EnhancedService`
:param LoginSettings: 节点登录信息(目前仅支持使用Password或者单个KeyIds)
:type LoginSettings: :class:`tencentcloud.tke.v20180525.models.LoginSettings`
:param SecurityGroupIds: 实例所属安全组。该参数可以通过调用 DescribeSecurityGroups 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。(目前仅支持设置单个sgId)
:type SecurityGroupIds: list of str
"""
self.InstanceIds = None
self.InstanceAdvancedSettings = None
self.EnhancedService = None
self.LoginSettings = None
self.SecurityGroupIds = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
if params.get("EnhancedService") is not None:
self.EnhancedService = EnhancedService()
self.EnhancedService._deserialize(params.get("EnhancedService"))
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.SecurityGroupIds = params.get("SecurityGroupIds")
class Filter(AbstractModel):
"""过滤器
"""
def __init__(self):
"""
:param Name: 属性名称, 若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
:type Name: str
:param Values: 属性值, 若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
class Instance(AbstractModel):
"""集群的实例信息
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param InstanceRole: 节点角色, MASTER, WORKER, ETCD, MASTER_ETCD,ALL, 默认为WORKER
:type InstanceRole: str
:param FailedReason: 实例异常(或者处于初始化中)的原因
:type FailedReason: str
:param InstanceState: 实例的状态(running 运行中,initializing 初始化中,failed 异常)
:type InstanceState: str
"""
self.InstanceId = None
self.InstanceRole = None
self.FailedReason = None
self.InstanceState = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceRole = params.get("InstanceRole")
self.FailedReason = params.get("FailedReason")
self.InstanceState = params.get("InstanceState")
class InstanceAdvancedSettings(AbstractModel):
"""描述了k8s集群相关配置与信息。
"""
def __init__(self):
"""
:param MountTarget: 数据盘挂载点, 默认不挂载数据盘. 已格式化的 ext3,ext4,xfs 文件系统的数据盘将直接挂载,其他文件系统或未格式化的数据盘将自动格式化为ext4 并挂载,请注意备份数据! 无数据盘或有多块数据盘的云主机此设置不生效。
:type MountTarget: str
:param DockerGraphPath: dockerd --graph 指定值, 默认为 /var/lib/docker
:type DockerGraphPath: str
:param UserScript: base64 编码的用户脚本, 此脚本会在 k8s 组件运行后执行, 需要用户保证脚本的可重入及重试逻辑, 脚本及其生成的日志文件可在节点的 /data/ccs_userscript/ 路径查看, 如果要求节点需要在进行初始化完成后才可加入调度, 可配合 unschedulable 参数使用, 在 userScript 最后初始化完成后, 添加 kubectl uncordon nodename --kubeconfig=/root/.kube/config 命令使节点加入调度
:type UserScript: str
:param Unschedulable: 设置加入的节点是否参与调度,默认值为0,表示参与调度;非0表示不参与调度, 待节点初始化完成之后, 可执行kubectl uncordon nodename使node加入调度.
:type Unschedulable: int
"""
self.MountTarget = None
self.DockerGraphPath = None
self.UserScript = None
self.Unschedulable = None
def _deserialize(self, params):
self.MountTarget = params.get("MountTarget")
self.DockerGraphPath = params.get("DockerGraphPath")
self.UserScript = params.get("UserScript")
self.Unschedulable = params.get("Unschedulable")
class LoginSettings(AbstractModel):
"""描述了实例登录相关配置与信息。
"""
def __init__(self):
"""
:param Password: 实例登录密码。不同操作系统类型密码复杂度限制不一样,具体如下:<br><li>Linux实例密码必须8到16位,至少包括两项[a-z,A-Z]、[0-9] 和 [( ) ` ~ ! @ # $ % ^ & * - + = | { } [ ] : ; ' , . ? / ]中的特殊符号。<br><li>Windows实例密码必须12到16位,至少包括三项[a-z],[A-Z],[0-9] 和 [( ) ` ~ ! @ # $ % ^ & * - + = { } [ ] : ; ' , . ? /]中的特殊符号。<br><br>若不指定该参数,则由系统随机生成密码,并通过站内信方式通知到用户。
注意:此字段可能返回 null,表示取不到有效值。
:type Password: str
:param KeyIds: 密钥ID列表。关联密钥后,就可以通过对应的私钥来访问实例;KeyId可通过接口DescribeKeyPairs获取,密钥与密码不能同时指定,同时Windows操作系统不支持指定密钥。当前仅支持购买的时候指定一个密钥。
注意:此字段可能返回 null,表示取不到有效值。
:type KeyIds: list of str
:param KeepImageLogin: 保持镜像的原始设置。该参数与Password或KeyIds.N不能同时指定。只有使用自定义镜像、共享镜像或外部导入镜像创建实例时才能指定该参数为TRUE。取值范围:<br><li>TRUE:表示保持镜像的登录设置<br><li>FALSE:表示不保持镜像的登录设置<br><br>默认取值:FALSE。
注意:此字段可能返回 null,表示取不到有效值。
:type KeepImageLogin: str
"""
self.Password = None
self.KeyIds = None
self.KeepImageLogin = None
def _deserialize(self, params):
self.Password = params.get("Password")
self.KeyIds = params.get("KeyIds")
self.KeepImageLogin = params.get("KeepImageLogin")
class RunInstancesForNode(AbstractModel):
"""不同角色的节点配置参数
"""
def __init__(self):
"""
:param NodeRole: 节点角色,取值:MASTER_ETCD, WORKER。MASTER_ETCD只有在创建 INDEPENDENT_CLUSTER 独立集群时需要指定。
:type NodeRole: str
:param RunInstancesPara: CVM创建透传参数,json化字符串格式,详见[CVM创建实例](https://cloud.tencent.com/document/product/213/15730)接口,传入公共参数外的其他参数即可,其中ImageId会替换为TKE集群OS对应的镜像。
:type RunInstancesPara: list of str
"""
self.NodeRole = None
self.RunInstancesPara = None
def _deserialize(self, params):
self.NodeRole = params.get("NodeRole")
self.RunInstancesPara = params.get("RunInstancesPara")
class RunMonitorServiceEnabled(AbstractModel):
"""描述了 “云监控” 服务相关的信息
"""
def __init__(self):
"""
:param Enabled: 是否开启[云监控](/document/product/248)服务。取值范围:<br><li>TRUE:表示开启云监控服务<br><li>FALSE:表示不开启云监控服务<br><br>默认取值:TRUE。
:type Enabled: bool
"""
self.Enabled = None
def _deserialize(self, params):
self.Enabled = params.get("Enabled")
class RunSecurityServiceEnabled(AbstractModel):
"""描述了 “云安全” 服务相关的信息
"""
def __init__(self):
"""
:param Enabled: 是否开启[云安全](/document/product/296)服务。取值范围:<br><li>TRUE:表示开启云安全服务<br><li>FALSE:表示不开启云安全服务<br><br>默认取值:TRUE。
:type Enabled: bool
"""
self.Enabled = None
def _deserialize(self, params):
self.Enabled = params.get("Enabled") | [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
31fdabf14f01efb19bf3c0756328b0bf7d43f9b3 | 394bf1fc97dac38c956f8c3c83185782ac97fae0 | /test.py | 48d0e2d7e427da6daf6c3320a6ecb31ab3a9bcd8 | [] | no_license | Mithsen/VGG50_test1 | ce182c048536bc9e612b912a5b3ca7c132f509cd | 90f2f1f2726e05bbccdfd13758db45ee08f001d8 | refs/heads/master | 2020-03-08T09:29:46.997407 | 2018-04-04T11:08:21 | 2018-04-04T11:08:21 | 128,048,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | import os
import sys
import argparse
import numpy as np
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.preprocessing import image
from keras.models import load_model
#from keras.applications.inception_v3 import preprocess_input
from keras.applications.vgg16 import preprocess_input
PATH = os.getcwd()
label_lines = [line.rstrip() for line
in tf.gfile.GFile(PATH+"/lbl.txt")]
model=load_model('model.hdf5')
"""Run model prediction on image
Args:
model: keras model
img: PIL format image
target_size: (w,h) tuple
Returns:
list of predicted labels and their probabilities
"""
img = Image.open("test.jpg")
#img = img.resize(299, 299)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
size = len(preds[0])
for i in range(size):
print ('%s (score = %.5f)' % (label_lines[i], preds[0][i]),)
| [
"mithsendesilva@gmail.com"
] | mithsendesilva@gmail.com |
67260b182f95c2fc11bb0ddc543a22d804d390b2 | 56bfcc353a57855f526ccf39e49e1f63d7ee4e95 | /tensforflow_study/07_linear_regression.py | 280d9a076cc1888fd89115dd83f26653cf9b6924 | [] | no_license | thankslife/AI_Project | 57100026568e228ebbb188469f3ac945aa652a63 | 078a8f0b497316bd24aec5311dbce025c9bc5d51 | refs/heads/master | 2020-03-25T01:56:00.752905 | 2018-04-01T03:34:05 | 2018-04-01T03:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
# 立刻下载数据集
housing = fetch_california_housing()
print(housing)
# 获得X数据行数和列数
m, n = housing.data.shape
# 这里添加一个额外的bias输入特征(x0=1)到所有的训练数据上面,因为使用的numpy所有会立即执行
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]#np.c_整合combine np.ones((m, 1)是x0=1这一列
#以上代码会立即执行 因为不是tf的函数
# 创建两个TensorFlow常量节点X和y,去持有数据和标签
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')#延迟执行,只是做了一下标记
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')#reshape(-1,1)把行向量转变成列向量 -1代表随便行 不限制行数 最终m行一列
#以上y是真实的数据
# 使用一些TensorFlow框架提供的矩阵操作去求theta
XT = tf.transpose(X)
# 解析解一步计算出最优解
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)#解析解公式
with tf.Session() as sess:
theta_value = theta.eval() # 与sess.run(theta)等价 theta相当于一个图通过DAG构建
print(theta_value)
| [
"945763695@qq.com"
] | 945763695@qq.com |
70b14635298c74a92c574417e14d469cd51dbda8 | e9039ce5e114dae3f02ac00ba8136a3611acf82a | /filter/migrations/0003_auto_20200626_1045.py | 3aed2c226b3f6f82ab5d2827c3df80eb17dfc275 | [] | no_license | Richard-Einsteine/Django-Bootstrap-Filter | 43667210af7ae56dc5f0e50544889ae63f816f1f | 9b6fbf650607af0b6d6a6142f81861407852c072 | refs/heads/master | 2022-11-05T05:53:29.783358 | 2020-06-27T10:31:29 | 2020-06-27T10:31:29 | 275,348,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # Generated by Django 2.2.2 on 2020-06-26 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filter', '0002_auto_20200626_0940'),
]
operations = [
migrations.AlterField(
model_name='journal',
name='publish_date',
field=models.DateTimeField(),
),
]
| [
"karasirarichard250@gmail.com"
] | karasirarichard250@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.