blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1fa1bec403921087904bbafbee13cec85e2e510f | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/trapping_20200617172315.py | 650a589856ffb33697b3724367c5a84d8aea7bf7 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | def trap(arr):
# left = max(arr)
# copy = []
# for j in arr:
# copy.append(j)
# arr.remove(left)
# right = max(arr)
# total = 0
# print(copy)
# for i in range(len(copy)-1):
# total += min(left,right) - copy[i]
# print(min(left,right),"-",copy[i],"==",total)
# print (total)
res = 0
trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
faf578870d909e797d741a23827c62c64eb02853 | 7745e94a0d9776154cfd1ce0062a59723c49bf6c | /lisa/api/models.py | 8d29013279079a0ff898f20f9354093e97e35add | [] | no_license | qpwang/lisa | dc064939fb0878aa803499166306e6b3835b18c9 | 31210138cc4e378808f477a14bf68255875aa164 | refs/heads/master | 2016-09-05T17:23:36.804378 | 2013-06-02T13:53:49 | 2013-06-02T13:53:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,117 | py | # -*- coding: utf-8 -*-
import requests
from uuid import uuid4
from django.db import models
from django.utils import simplejson as json
from lisa.api.consts import *
class ThirdPartySource(models.Model):
'''第三方平台'''
class Meta:
db_table = 'lisa_third_party_source'
verbose_name = verbose_name_plural = '第三方API'
name = models.CharField(max_length=64, verbose_name='名称')
api = models.CharField(max_length=200, verbose_name='API地址')
app_key = models.CharField(max_length=200, verbose_name='appkey')
app_secret = models.CharField(blank=True, null=True, max_length=200, verbose_name='appsecret')
update_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
@classmethod
def _access_token(cls,access_token, uid, source):
third_party_source = cls.objects.get(name=source)
if source == 'sina':
post_dict = {
'access_token': access_token,
}
api = '%s?access_token%s' % (third_party_source.api, access_token)
response = requests.post(api, post_dict)
result = json.loads(response.content)
if str(result.get('uid')) == uid:
return third_party_source.id
elif source == 'renren':
post_dict = {
'access_token': access_token,
'v': '1.0',
'format': 'json',
'method': 'users.getInfo',
'fields': 'uid',
}
api = third_party_source.api
response = requests.post(api, post_dict)
result = json.loads(response.content)
if str(result[0].get('uid')) == uid:
return third_party_source.id
class Group(models.Model):
'''小组'''
class Meta:
db_table = 'lisa_group'
verbose_name = verbose_name_plural = '小组'
ordering = ('id',)
CHOICE_GROUP_TYPE = (
(GROUP_CATEGORY_SCHOOL, u'学校'),
(GROUP_CATEGORY_TOPIC, u'话题'),
)
name = models.CharField(max_length=128, verbose_name='名称')
pinyin = models.CharField(max_length=200, verbose_name='拼音')
py_first = models.CharField(max_length=200, verbose_name='拼音')
category = models.IntegerField(choices=CHOICE_GROUP_TYPE, verbose_name='类型')
update_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class User(models.Model):
'''用户表'''
class Meta:
db_table = 'lisa_user'
verbose_name = verbose_name_plural = '用户'
unique_together = ('uid', 'source')
CHOICE_USER_STATUS = (
(USER_STATUS_NORMAL, u'正常'),
(USER_STATUS_BAN, u'封号'),
(USER_STATUS_FORBIDDEN, u'禁言'),
)
user_name = models.CharField(max_length=128, verbose_name='用户名')
uid = models.CharField(max_length=128, verbose_name='uid')
source = models.ForeignKey(ThirdPartySource, verbose_name='用户来源')
token = models.CharField(max_length=200, verbose_name='token')
status = models.IntegerField(choices=CHOICE_USER_STATUS, verbose_name='用户状态')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='注册时间')
update_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.user_name
@classmethod
def _get_user(cls, user_name, uid, source_id):
user = cls.objects.filter(source_id=source_id).filter(uid=uid).all()
if not user:
user = User()
user.user_name = user_name
user.uid = uid
user.source_id = source_id
user.token = uuid4()
user.status = USER_STATUS_NORMAL
user.save()
else:
user = user[0]
return user
class Secret(models.Model):
'''秘密'''
class Meta:
db_table = 'lisa_secret'
verbose_name = verbose_name_plural = '秘密'
CHOICE_SECRET_STATUS = (
(SECRET_STATUS_NORMAL, '正常'),
(SECRET_STATUS_FORBIDDEN, '屏蔽'),
)
content = models.CharField(max_length=200, verbose_name='秘密内容')
author = models.ForeignKey(User, verbose_name='发送人')
group = models.ForeignKey(Group, verbose_name='小组')
status = models.IntegerField(choices=CHOICE_SECRET_STATUS, verbose_name='状态')
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.content
@classmethod
def _add_secret(cls, user, group_id, content):
secret = cls()
secret.content = content
secret.author_id = user.id
secret.group_id = group_id
secret.status = SECRET_STATUS_NORMAL
secret.save()
return secret
class Comment(models.Model):
'''评论'''
class Meta:
db_table = 'lisa_comment'
verbose_name = verbose_name_plural = '评论'
CHOICE_COMMENT_STATUS = (
(COMMENT_STATUS_NORMAL, '正常'),
(COMMENT_STATUS_FORBIDDEN, '屏蔽'),
)
content = models.CharField(max_length=200, verbose_name='评论')
author = models.ForeignKey(User)
secret = models.ForeignKey(Secret, verbose_name='秘密')
reply_to = models.ForeignKey('self', null=True)
floor = models.IntegerField()
status = models.IntegerField(choices=CHOICE_COMMENT_STATUS, verbose_name='状态')
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.content
@classmethod
def _add_comment(cls, content, user_id, secret_id, reply_to, floor):
comment = cls()
comment.content = content
comment.author_id = user_id
comment.secret_id = secret_id
comment.reply_to_id = reply_to
comment.floor = floor
comment.status = COMMENT_STATUS_NORMAL
comment.save()
return comment
class Notice(models.Model):
'''通知'''
class Meta:
db_table = 'lisa_notice'
verbose_name = verbose_name_plural = '通知'
CHOICE_NOTICE_STATUS = (
(NOTICE_STATUS_UNREAD, '未读'),
(NOTICE_STATUS_READED, '已读'),
)
receive_user = models.ForeignKey(User, verbose_name='接收人')
comment = models.ForeignKey(Comment, verbose_name='秘密')
status = models.IntegerField(choices=CHOICE_NOTICE_STATUS, verbose_name='通知状态')
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
@classmethod
def _add_notice(cls, receive_user_id, comment):
notice = cls()
notice.receive_user_id = receive_user_id
notice.comment_id = comment.id
notice.status = NOTICE_STATUS_UNREAD
notice.save()
class GroupUserRelation(models.Model):
'''小组关注列表'''
class Meta:
db_table = 'lisa_group_user_relation'
verbose_name = verbose_name_plural = '关注列表'
unique_together = ('user', 'group')
CHOICE_GROUP_USER_RELATION = (
(GROUP_USER_STATUS_FOLLOW, '关注'),
(GROUP_USER_STATUS_UNFOLLOW, '未关注'),
(GROUP_USER_STATUS_BAN, '拉黑'),
)
group = models.ForeignKey(Group)
user = models.ForeignKey(User)
status = models.IntegerField(choices=CHOICE_GROUP_USER_RELATION)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
@classmethod
def _update_relation(cls, user_id, group_id, status):
relation = GroupUserRelation.objects.filter(user_id=user_id, group_id=group_id).all()
if relation:
relation = relation[0]
relation.status = status
relation.save()
else:
relation = GroupUserRelation()
relation.user_id = user_id
relation.group_id = group_id
relation.status = status
relation.save()
return relation
| [
"wangqingpeng@douban.com"
] | wangqingpeng@douban.com |
94b7e1be0fd25f0aa12ff893420b0737529e1b69 | 4229183b367ecc0a4b981fc0991651482b1a6b17 | /Python/Python_problems/simcross.py | 6b5d3ee7351f36fae9642a2e9bf428ecf4d445ff | [] | no_license | shiraz-30/Intro-to-Python | f0c0ff5afabcf6682aa6690e7b627b1c0f4e2e03 | 6ab108a88afd7cc074cc4ec697e0d79dc3302956 | refs/heads/main | 2023-05-03T22:20:02.315512 | 2021-05-27T15:09:41 | 2021-05-27T15:09:41 | 371,410,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | words = {}
crsswrd = []
rows, columns = {},{}
n,m = map(int, input().split())
for r in range(n):
line = list(input())
crsswrd.append(line)
cnt = []
for index in range(m):
char = line[index]
if char == "b" or char == "r":
cnt.append(index)
if len(cnt) != 0:
rows[cnt[1] - cnt[0] + 1] = [r, cnt[0]]
for c in range(m):
cnt = []
for index in range(n):
row = crsswrd[index]
char = row[c]
if char == "b" or char == "c":
cnt.append(index)
if len(cnt) != 0:
columns[cnt[1] - cnt[0] + 1] = [cnt[0], c]
for i in range(int(input())):
word = input()
words[len(word)] = word
lengths = set(rows.keys()) | set(columns.keys())
if lengths != set(words.keys()):
print("Invalid")
exit()
crsswrd = [ ["#"]*m for i in range(n)]
for length in rows.keys():
rc = rows[length]
c = rc[1]
row = crsswrd[rc[0]]
word = words[length]
i = 0
for index in range(c, c + length):
row[index] = word[i]
i +=1
for length in columns.keys():
rc = columns[length]
r = rc[0]
row = crsswrd[rc[0]]
c = rc[1]
word = words[length]
i = 0
for index in range(r,r +length):
row = crsswrd[index]
char = row[c]
if char != "#":
if char != word[i]:
print("Invalid")
exit()
row[c] = word[i]
i += 1
for row in crsswrd:
line = ''.join(row)
print(line)
| [
"noreply@github.com"
] | shiraz-30.noreply@github.com |
a0caf1437c5011bb7bf01a76266e9121bf0f748d | 27906d6dec7803622d20d2d111a675c4cb10c88b | /polls/models.py | 60d46cf1f1e5ef7b080e3c4980b460dea9ec60fb | [] | no_license | Vienio99/polls-improved | 90cd6593eeb3b5b78be59b09e8402548f2102fe8 | b1aaba246769d6464b20a8f4b5febbe015a4a0f1 | refs/heads/main | 2023-02-15T16:09:43.318948 | 2021-01-12T17:30:54 | 2021-01-12T17:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | from django.db import models
from django.utils import timezone
import datetime
from django.contrib.auth.models import User
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
author = models.ForeignKey(User, default='Default', on_delete=models.CASCADE)
pub_date = models.DateTimeField('date published', default=timezone.now)
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"kyve96@gmail.com"
] | kyve96@gmail.com |
bd4a9a56ca71e397b6a266f1919c1626b4d31214 | 5390d79dad71ad0d9ff9d0777435dcaf4aad16b3 | /chapter_07/pizza_topping3.py | d3a2378353388256cf56dcd63ea0eaf942f43e2c | [] | no_license | JasperMi/python_learning | 19770d79cce900d968cec76dac11e45a3df9c34c | 8111d0d12e4608484864dddb597522c6c60b54e8 | refs/heads/master | 2020-11-26T08:57:02.983869 | 2020-03-11T10:14:55 | 2020-03-11T10:14:55 | 218,935,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | prompt = "\nPlease input your pizza toppings:"
prompt += "\nEnter 'quit' to end the program. "
message = ""
while True:
pizza_topping = input(prompt)
if pizza_topping == 'quit':
break
else:
print("We'll add " + pizza_topping + ".")
| [
"darmi19@163.com"
] | darmi19@163.com |
dd965d230c675da30a2c48667e417f5269b2c963 | 8f36a508dd5be7198022c50b498874645989f695 | /migrations/versions/fb8c13e85d92_users_table.py | 1bee040bd2666a6f65b938105ef8a750f09d76e8 | [] | no_license | kpfu-ses/uvo-reserves-demo | 2e93fc4716799d1a1ea3553f40ec9da8f72f5ab4 | daaf86ba6c793acc6cc390c6dcc7ed32b1383486 | refs/heads/master | 2023-05-08T22:16:28.152278 | 2021-05-10T01:37:55 | 2021-05-10T01:37:55 | 332,978,501 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | """users table
Revision ID: fb8c13e85d92
Revises:
Create Date: 2021-01-26 09:22:58.149529
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fb8c13e85d92'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('confirmcode', sa.String(length=128), nullable=True),
sa.Column('state', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| [
"sumbel.enikeeva@gmail.com"
] | sumbel.enikeeva@gmail.com |
691cf31fb1d7e831b764492ca883e2c4ccfdeb40 | 9a8ff03d27e4822fa27f78fb6ba1dd419725cf90 | /home/urls.py | 1411cf4d18bcdf2783251f6cd45251f3234642e5 | [] | no_license | Pavlo-Olshansky/Social-lemon | 0f9f994fbbba78cd0e7defa1e7fcf60b6ed55165 | 3b7f0b9e8526f6c74d98ad38412151ea2678f808 | refs/heads/master | 2022-12-11T01:12:30.919023 | 2018-06-17T14:57:10 | 2018-06-17T14:57:10 | 104,945,732 | 0 | 0 | null | 2022-11-22T01:53:50 | 2017-09-26T22:48:52 | Python | UTF-8 | Python | false | false | 2,089 | py | from django.conf.urls import url, include
from . import views
from django.contrib.auth import views as auth_views
from .forms import CustomAuthForm
from django.contrib.auth.models import User
urlpatterns = [
# url(r'^$', views.HomePage.as_view(), name='home'),
# Register new user
url(r'^signup/', views.SignUp.as_view(), name='signup'),
# Login URL
url(r'^login/$', auth_views.login, {'template_name': 'registration/login.html', 'authentication_form': CustomAuthForm}, name='login'),
url(r'^$', auth_views.login,
{'template_name': 'home.html',
'authentication_form': CustomAuthForm,
'extra_context':
{'recommendations': views.recommendation_list }
}, name='home-login'),
# Logout URL
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
# Reset password
url(r'^password_reset/$', auth_views.password_reset, {'post_reset_redirect': '/password_reset/done/'}, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',auth_views.password_reset_confirm, {'post_reset_redirect': '/reset/done/'}, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
# Send an activation URL
url(r'^account_activation_sent/$', views.account_activation_sent, name='account_activation_sent'),
# Activation URL
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
# Profile URL's
url(r'^profile/$', views.ViewProfile.as_view(), name='view_profile'),
url(r'^profile/(?P<pk>\d+)/$', views.ViewProfile.as_view(), name='view_profile_with_pk'),
url(r'^profile/edit/$', views.edit_profile, name='edit_profile'),
url(r'^profile/password/$', views.ChangePassword.as_view(), name='change_password'),
]
| [
"pavlo.olshansky@gmail.com"
] | pavlo.olshansky@gmail.com |
702cd226d836b66cf059b8ee429af1a734eb9a5f | 595a755dce15abd52669c077a922e2304be0e5c9 | /10/alcatraz.py | b61f9672a60f105d162d8248f97a4a3088179349 | [] | no_license | ktn-andrea/Scripts | 4bcc578edabf8a5b309ff6cab9e76ca34a31dca3 | 2fefcc4dee1c8c295cb3f6f3e9096ff6e8c060ab | refs/heads/master | 2023-08-15T02:28:25.319964 | 2021-05-23T22:37:00 | 2021-05-23T22:37:00 | 370,164,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/env python3
def open_door(li, begin):
for i in range(1, len(li)+1):
if i % begin == 0:
li[i-1] = rotate_key(li[i-1])
return li
def rotate_key(n):
if n == 0:
return 1
elif n == 1:
return 0
def main():
doors = [0 for i in range(1, 600+1)]
for d in range(1, len(doors)+1):
doors = open_door(doors, d)
res = []
for i in range(1, len(doors)+1):
if doors[i-1] == 1:
res.append(i)
print("Nyitott ajtok sorszamai: ")
print(res)
print(''.join([str(i) for i in res]))
#######################################################
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | ktn-andrea.noreply@github.com |
b338a87119cd7fc03f0a87fd799d9a67b26c4624 | 9bce3862ef1c6235212a49cf49d300e0532be92c | /politico/api/v1/party/model.py | 71c0e83868241558d163aef959a431d9433a56a7 | [] | no_license | erycoking/Politico_API | d7f5458267dee0736ba154e602869d75514abde2 | 004727b03bfb77bd8d717bcf59b3fa3af5d699a0 | refs/heads/develop | 2022-12-09T08:24:37.589001 | 2019-03-06T01:40:29 | 2019-03-06T01:40:29 | 169,312,409 | 1 | 1 | null | 2022-12-08T01:37:36 | 2019-02-05T21:03:40 | Python | UTF-8 | Python | false | false | 2,779 | py | """
contains the class for party model
"""
class Party:
def __init__(self, id, name, hq_address, logo_url):
self._id = id
self._name = name
self._hq_address = hq_address
self._logo_url = logo_url
@property
def id(self):
# id getter
return self._id
@id.setter
def id(self, id):
# id setter
return self._id
@property
def name(self):
# name getter
return self._name
@name.setter
def name(self, name):
# name setter
self._name = name
@property
def hq_address(self):
# hq_address getter
return self._hq_address
@hq_address.setter
def hq_address(self, address):
# hq_address setter
self._hq_address = address
@property
def logo_url(self):
# logo getter
return self._logo_url
@logo_url.setter
def logo_url(self, logo):
# logo_url setter
self._logo_url = logo
@property
def party_data(self):
party_data = {}
party_data['id'] = self._id
party_data['name'] = self._name
party_data['hq_address'] = self._hq_address
party_data['logo_url'] = self._logo_url
return party_data
@property
def party_data_for_updates_and_deletes(self):
party_data_for_updates_and_deletes = {}
party_data_for_updates_and_deletes['id'] = self._id
party_data_for_updates_and_deletes['name'] = self._name
return party_data_for_updates_and_deletes
class PartyTable:
""" acts as a table for storing parties and their related information"""
parties = {}
next_id = len(parties) + 1
def get_single_party_by_name(self, name):
# gets a single party by name
for party in self.parties.values():
if party['name'] == name:
return party
def add_party(self, party_data):
# add a new party
new_party = Party(
self.next_id,
party_data['name'],
party_data['hq_address'],
party_data['logo_url']
)
self.parties[self.next_id] = new_party.party_data
return self.parties[self.next_id]
def update_party(self, id, party_data):
# updates party data
party = self.parties.get(id)
party['name'] = party_data['name']
party['hq_address'] = party_data['hq_address']
party['logo_url'] = party_data['logo_url']
self.parties[id] = party
return self.parties[id]
def delete_party(self, id):
# deletes a party from the list of parties
party = self.parties.get(id)
if party:
del self.parties[id]
return True
return False
| [
"erycoking360@gmail.com"
] | erycoking360@gmail.com |
516ad36a3129084713ccfa27cbe83dfa3be28716 | 3780222b17c869607886bc642ece6fb9def97020 | /samples/NGPF/SDN/PCEP/pcep_pcreq_reply_sr-te_lsp.py | b335888a548c02a3a560b5c0446f73487c8c281e | [
"MIT"
] | permissive | muxiang0906/ixnetwork-api-py | ef28212cf05b6dfa70bf4ab52b8550fdc65535fa | 076f2aa2c09daea8b09a41240b294433950cff18 | refs/heads/master | 2020-06-16T22:59:41.313318 | 2018-10-15T23:14:23 | 2018-10-15T23:14:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,769 | py | # -*- coding: cp1252 -*-
################################################################################
# #
# Copyright 1997 - 2018 by IXIA Keysight #
# All Rights Reserved. #
# #
################################################################################
################################################################################
# #
# LEGAL NOTICE: #
# ============== #
# The following code and documentation (hereinafter "the script") is an #
# example script for demonstration purposes only. #
# The script is not a standard commercial product offered by Ixia and have #
# been developed and is being provided for use only as indicated herein. The #
# script [and all modifications enhancements and updates thereto (whether #
# made by Ixia and/or by the user and/or by a third party)] shall at all times #
# remain the property of Ixia. #
# #
# Ixia does not warrant (i) that the functions contained in the script will #
# meet the users requirements or (ii) that the script will be without #
# omissions or error-free. #
# THE SCRIPT IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND AND IXIA #
# DISCLAIMS ALL WARRANTIES EXPRESS IMPLIED STATUTORY OR OTHERWISE #
# INCLUDING BUT NOT LIMITED TO ANY WARRANTY OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE OR OF NON-INFRINGEMENT. #
# THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE SCRIPT IS WITH THE #
# USER. #
# IN NO EVENT SHALL IXIA BE LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING #
# OUT OF THE USE OF OR THE INABILITY TO USE THE SCRIPT OR ANY PART THEREOF #
# INCLUDING BUT NOT LIMITED TO ANY LOST PROFITS LOST BUSINESS LOST OR #
# DAMAGED DATA OR SOFTWARE OR ANY INDIRECT INCIDENTAL PUNITIVE OR #
# CONSEQUENTIAL DAMAGES EVEN IF IXIA HAS BEEN ADVISED OF THE POSSIBILITY OF #
# SUCH DAMAGES IN ADVANCE. #
# Ixia will not be required to provide any software maintenance or support #
# services of any kind (e.g. any error corrections) in connection with the #
# script or any part thereof. The user acknowledges that although Ixia may #
# from time to time and in its sole discretion provide maintenance or support #
# services for the script any such services are subject to the warranty and #
# damages limitations set forth herein and will not obligate Ixia to provide #
# any additional maintenance or support services. #
# #
################################################################################
################################################################################
# Description:
# 1. PCC Requested SR LSPs are statically configured in PCC, with initial
# delegation TRUE. When PCC starts, it sends PCRequest these LSPs with PCE.
# 2. Assign ports
# 3. Start all protocols
# 4. Retrieve protocol statistics. (PCE Sessions Per Port)
# 5. Retrieve protocol statistics. (PCC Per Port)
# 6. Send PCUpdate over DELEGATED Requested SR LSPs.
# 7. Retrieve protocol statistics. (PCE Sessions Per Port)
# 8. Retrieve protocol statistics. (PCC Per Port)
# 9. Stop all protocols
################################################################################
import time
import sys
#-------------------------------------------------------------------------------
# import IxNetwork
#-------------------------------------------------------------------------------
IX_NETWORK_LIBRARY_PATH = 'C:/Program Files (x86)/Ixia/IxNetwork/8.30.1076.4-EB/API/Python'
sys.path.append(IX_NETWORK_LIBRARY_PATH)
import IxNetwork
# START HARNESS VARS **********************************************************
if 'py' not in dir():
class Py: pass
py = Py()
py.ports = (('10.216.108.96','2','1'),('10.216.108.96','2','2'))
py.ixTclServer = "10.216.108.113"
py.ixTclPort = 8081
# END HARNESS VARS ************************************************************
################################################################################
# Connect to IxNet client
################################################################################
ixNet = IxNetwork.IxNet()
ixNet.connect(py.ixTclServer, '-port', py.ixTclPort, '-version', '8.30')
################################################################################
# Cleaning up IxNetwork
################################################################################
print ("Cleaning up IxNetwork...")
ixNet.execute('newConfig')
print("Get IxNetwork root object")
root = ixNet.getRoot()
################################################################################
# Adding virtual ports
################################################################################
print("Adding virtual port 1")
vport1 = ixNet.add(root, 'vport')
ixNet.commit()
vport1 = ixNet.remapIds(vport1)[0]
ixNet.setAttribute(vport1, '-name', '10GE LAN - 001')
ixNet.commit()
print("Adding virtual port 2")
vport2 = ixNet.add(root, 'vport')
ixNet.commit()
vport2 = ixNet.remapIds(vport2)[0]
ixNet.setAttribute(vport2, '-name', '10GE LAN - 002')
ixNet.commit()
################################################################################
# Adding topology
################################################################################
print("Adding topology 1")
topology1 = ixNet.add(root, 'topology')
ixNet.commit()
topology1 = ixNet.remapIds(topology1)[0]
ixNet.setAttribute(topology1, '-name', 'Topology 1')
ixNet.setAttribute(topology1, '-vports', vport1)
ixNet.commit()
################################################################################
# Adding device group
################################################################################
print("Adding device group 1")
device1 = ixNet.add(topology1, 'deviceGroup')
ixNet.commit()
device1 = ixNet.remapIds(device1)[0]
ixNet.setAttribute(device1, '-name', 'Device Group 1')
ixNet.setAttribute(device1, '-multiplier', '1')
ixNet.commit()
################################################################################
# Adding ethernet layer
################################################################################
print("Adding ethernet 1")
ethernet1 = ixNet.add(device1, 'ethernet')
ixNet.commit()
ethernet1 = ixNet.remapIds(ethernet1)[0]
macMv = ixNet.getAttribute(ethernet1, '-mac')
ixNet.add(macMv, 'counter')
ixNet.setMultiAttribute(macMv + '/counter',
'-direction', 'increment',
'-start' , '00:11:01:00:00:01',
'-step' , '00:00:00:00:00:01')
ixNet.commit()
################################################################################
# Adding IPv4 layer
################################################################################
print("Adding ipv4 1")
ipv4Addr1 = ixNet.add(ethernet1, 'ipv4')
ixNet.commit()
ipv4Addr1 = ixNet.remapIds(ipv4Addr1)[0]
addressMv = ixNet.getAttribute(ipv4Addr1, '-address')
ixNet.add(addressMv, 'singleValue')
ixNet.setMultiAttribute(addressMv + '/singleValue',
'-value', '1.1.1.1')
ixNet.commit()
gatewayIpMv = ixNet.getAttribute(ipv4Addr1, '-gatewayIp')
ixNet.add(gatewayIpMv, 'singleValue')
ixNet.setMultiAttribute(gatewayIpMv + '/singleValue',
'-value', '1.1.1.2')
ixNet.commit()
################################################################################
# Adding PCE layer
################################################################################
print("Adding PCE 1")
pce1 = ixNet.add(ipv4Addr1, 'pce')
ixNet.commit()
pce1 = ixNet.remapIds(pce1)[0]
################################################################################
# Adding PCC Group
################################################################################
print("Adding PCC Group1")
pccGroup1 = ixNet.add(pce1, 'pccGroup')
ixNet.commit()
pccGroup1 = ixNet.remapIds(pccGroup1)[0]
pccIpv4AddressMv = ixNet.getAttribute(pccGroup1, '-pccIpv4Address')
ixNet.add(pccIpv4AddressMv, 'counter')
ixNet.setMultiAttribute(pccIpv4AddressMv + '/counter',
'-direction', 'increment',
'-start' , '1.1.1.2',
'-step' , '0.0.0.1')
ixNet.commit()
ixNet.setAttribute(pccGroup1, '-multiplier', '10')
ixNet.commit()
ixNet.setAttribute(pccGroup1, '-pceInitiatedLspsPerPcc', '0')
ixNet.commit()
ixNet.setAttribute(pccGroup1, '-pcReplyLspsPerPcc', '1')
ixNet.commit()
################################################################################
# Adding PCRequest Match Criteria
# Configured parameters :
# -srcIpv4Address
# -destIpv4Address
################################################################################
pceReqMatchCriteria1 = pccGroup1+'/pcRequestMatchCriteria:1'
srcEndPointIpv4Mv = ixNet.getAttribute(pceReqMatchCriteria1, '-srcIpv4Address')
ixNet.add(srcEndPointIpv4Mv, 'counter')
ixNet.setMultiAttribute(srcEndPointIpv4Mv + '/counter',
'-direction', 'increment',
'-start' , '100.0.0.1',
'-step' , '0.0.0.1')
ixNet.commit()
destEndPointIpv4Mv = ixNet.getAttribute(pceReqMatchCriteria1, '-destIpv4Address')
ixNet.add(destEndPointIpv4Mv, 'counter')
ixNet.setMultiAttribute(destEndPointIpv4Mv + '/counter',
'-direction', 'increment',
'-start' , '101.0.0.1',
'-step' , '0.0.0.1')
ixNet.commit()
################################################################################
# Adding topology
################################################################################
print("Adding topology 2")
topology2 = ixNet.add(root, 'topology')
ixNet.commit()
topology2 = ixNet.remapIds(topology2)[0]
ixNet.setAttribute(topology2, '-name', 'Topology 2')
ixNet.setAttribute(topology2, '-vports', vport2)
ixNet.commit()
################################################################################
# Adding device group
################################################################################
print("Adding device group 2")
device2 = ixNet.add(topology2, 'deviceGroup')
ixNet.commit()
device2 = ixNet.remapIds(device2)[0]
ixNet.setAttribute(device2, '-name', 'Device Group 2')
ixNet.setAttribute(device2, '-multiplier', '10')
ixNet.commit()
################################################################################
# Adding ethernet layer
################################################################################
print("Adding ethernet 2")
ethernet2 = ixNet.add(device2, 'ethernet')
ixNet.commit()
ethernet2 = ixNet.remapIds(ethernet2)[0]
macMv = ixNet.getAttribute(ethernet2, '-mac')
ixNet.add(macMv, 'counter')
ixNet.setMultiAttribute(macMv + '/counter',
'-direction', 'increment',
'-start' , '00:12:01:00:00:01',
'-step' , '00:00:00:00:00:01')
ixNet.commit()
################################################################################
# Adding IPv4 layer
################################################################################
print("Adding ipv4 2")
ipv4Addr2 = ixNet.add(ethernet2, 'ipv4')
ixNet.commit()
ipv4Addr2 = ixNet.remapIds(ipv4Addr2)[0]
addressMv = ixNet.getAttribute(ipv4Addr2, '-address')
ixNet.add(addressMv, 'counter')
ixNet.setMultiAttribute(addressMv + '/counter',
'-direction', 'increment',
'-start' , '1.1.1.2',
'-step' , '0.0.0.1')
ixNet.commit()
gatewayIpMv = ixNet.getAttribute(ipv4Addr2, '-gatewayIp')
ixNet.add(gatewayIpMv, 'singleValue')
ixNet.setMultiAttribute(gatewayIpMv + '/singleValue',
'-value', '1.1.1.1')
ixNet.commit()
################################################################################
# Adding PCC layer
################################################################################
print("Adding PCC 2")
pcc2 = ixNet.add(ipv4Addr2, 'pcc')
ixNet.commit()
pcc2 = ixNet.remapIds(pcc2)[0]
pceIpv4AddressMv = ixNet.getAttribute(pcc2, '-pceIpv4Address')
ixNet.add(pceIpv4AddressMv, 'singleValue')
ixNet.setMultiAttribute(pceIpv4AddressMv + '/singleValue',
'-value', '1.1.1.1')
ixNet.commit()
ixNet.setAttribute(pcc2, '-expectedInitiatedLspsForTraffic', '0')
ixNet.commit()
ixNet.setAttribute(pcc2, '-preEstablishedSrLspsPerPcc', '0')
ixNet.commit()
ixNet.setAttribute(pcc2, '-requestedLspsPerPcc', '1')
ixNet.commit()
################################################################################
# Adding Requested LSPs
# Configured parameters :
# -sourceIpv6Address
# -sourceIpv4Address
# -includeMetric
# -maxNoOfIroSubObjects
# -initialDelegation
################################################################################
reqLsp2 = pcc2 + '/requestedLsps:1'
sourceIpv6AddressMv = ixNet.getAttribute(reqLsp2, '-sourceIpv6Address')
ixNet.add(sourceIpv6AddressMv, 'counter')
ixNet.setMultiAttribute(sourceIpv6AddressMv + '/counter',
'-direction', 'increment',
'-start' , '1000:0:0:0:0:0:0:1',
'-step' , '0:0:0:0:0:0:0:1')
ixNet.commit()
sourceIpv4AddressMv = ixNet.getAttribute(reqLsp2, '-sourceIpv4Address')
ixNet.add(sourceIpv4AddressMv, 'counter')
ixNet.setMultiAttribute(sourceIpv4AddressMv + '/counter',
'-direction', 'increment',
'-start' , '100.0.0.1',
'-step' , '0.0.0.1')
ixNet.commit()
initialDelegationMv = ixNet.getAttribute(reqLsp2, '-initialDelegation')
ixNet.add(initialDelegationMv, 'singleValue')
ixNet.setMultiAttribute(initialDelegationMv + '/singleValue',
'-value', 'true')
ixNet.commit()
includeMetricMv = ixNet.getAttribute(reqLsp2, '-includeMetric')
ixNet.add(includeMetricMv, 'singleValue')
ixNet.setMultiAttribute(includeMetricMv + '/singleValue',
'-value', 'true')
ixNet.commit()
ixNet.setAttribute(reqLsp2, '-maxNoOfIroSubObjects', '1')
ixNet.commit()
################################################################################
# Adding Requested IRO object
# Configured parameters :
# -subObjectType
# -ipv4Address
# -prefixLength
# -active
################################################################################
reqLspMetric2 = reqLsp2 + '/pcepIroSubObjectsList:1'
subObjectTypeMv = ixNet.getAttribute(reqLspMetric2, '-subObjectType')
ixNet.add(subObjectTypeMv, 'singleValue')
ixNet.setMultiAttribute(subObjectTypeMv + '/singleValue',
'-value', 'ipv4prefix')
ixNet.commit()
ipv4AddressMv = ixNet.getAttribute(reqLspMetric2, '-ipv4Address')
ixNet.add(ipv4AddressMv, 'counter')
ixNet.setMultiAttribute(ipv4AddressMv + '/counter',
'-direction', 'increment',
'-start' , '100.0.0.1',
'-step' , '0.0.0.1')
ixNet.commit()
prefixLengthMv = ixNet.getAttribute(reqLspMetric2, '-prefixLength')
ixNet.add(prefixLengthMv, 'singleValue')
ixNet.setMultiAttribute(prefixLengthMv + '/singleValue',
'-value', '24')
ixNet.commit()
activeMv = ixNet.getAttribute(reqLspMetric2, '-active')
ixNet.add(activeMv, 'singleValue')
ixNet.setMultiAttribute(activeMv + '/singleValue',
'-value', 'true')
ixNet.commit()
################################################################################
# 2. Assign ports
################################################################################
print("Assigning ports")
chassisIp = py.ports[0][0]
card1 = py.ports[0][1]
port1 = py.ports[0][2]
card2 = py.ports[1][1]
port2 = py.ports[1][2]
chassis = ixNet.add(root + '/availableHardware', 'chassis')
ixNet.setMultiAttribute(chassis, '-hostname', chassisIp)
ixNet.commit()
ixNet.setAttribute(vport1, '-connectedTo',
'%s/card:%s/port:%s' % (chassis, card1, port1))
ixNet.commit()
ixNet.setAttribute(vport2, '-connectedTo',
'%s/card:%s/port:%s' % (chassis, card2, port2))
ixNet.commit()
time.sleep(5)
print("Starting all protocols")
################################################################################
# 3. Start all protocols
################################################################################
ixNet.execute('startAllProtocols')
print("Wait for 1 minute")
time.sleep(60)
################################################################################
# 4. Retrieve protocol statistics (PCE Sessions Per Port) #
################################################################################
print ("Fetching all PCE Sessions Per Port Stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"PCE Sessions Per Port"/page'
statcap = ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in ixNet.getAttribute(viewPage, '-rowValues') :
for statVal in statValList :
print("***************************************************")
index = 0
for satIndv in statVal :
print("%-40s:%s" % (statcap[index], satIndv))
index = index + 1
# end for
# end for
# end for
print("***************************************************")
################################################################################
# 5. Retrieve protocol statistics (PCC Per Port) #
################################################################################
print ("Fetching all PCC Per Port Stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"PCC Per Port"/page'
statcap = ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in ixNet.getAttribute(viewPage, '-rowValues') :
for statVal in statValList :
print("***************************************************")
index = 0
for satIndv in statVal :
print("%-40s:%s" % (statcap[index], satIndv))
index = index + 1
# end for
# end for
# end for
print("***************************************************")
################################################################################
# 6. Trigger PCUpdate from PCE side PCReply LSPs #
################################################################################
pcReplyLspParameters1 = pccGroup1+'/pcReplyLspParameters:1'
includeBandwidth = ixNet.getAttribute(pcReplyLspParameters1, '-includeBandwidth')
ixNet.add(includeBandwidth, 'singleValue')
ixNet.setMultiAttribute(includeBandwidth + '/singleValue',
'-value', 'true')
ixNet.commit()
globals = root+'/globals'
topology = globals+'/topology'
print("Applying OTF for sending PCUpd message....")
ixNet.execute('applyOnTheFly', topology)
print("***************************************************")
print("Wait for 30 Seconds")
time.sleep(30)
print("Checking statistics after PCUpdate")
################################################################################
# 7. Retrieve protocol statistics (PCE Sessions Per Port) #
################################################################################
print ("Fetching all PCE Sessions Per Port Stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"PCE Sessions Per Port"/page'
statcap = ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in ixNet.getAttribute(viewPage, '-rowValues') :
for statVal in statValList :
print("***************************************************")
index = 0
for satIndv in statVal :
print("%-40s:%s" % (statcap[index], satIndv))
index = index + 1
# end for
# end for
# end for
print("***************************************************")
################################################################################
# 8. Retrieve protocol statistics (PCC Per Port) #
################################################################################
print ("Fetching all PCC Per Port Stats\n")
viewPage = '::ixNet::OBJ-/statistics/view:"PCC Per Port"/page'
statcap = ixNet.getAttribute(viewPage, '-columnCaptions')
for statValList in ixNet.getAttribute(viewPage, '-rowValues') :
for statVal in statValList :
print("***************************************************")
index = 0
for satIndv in statVal :
print("%-40s:%s" % (statcap[index], satIndv))
index = index + 1
# end for
# end for
# end for
print("***************************************************")
################################################################################
# 9. Stop all protocols #
################################################################################
print ("Stop all protocols")
ixNet.execute('stopAllProtocols')
| [
"diana.galan@keysight.com"
] | diana.galan@keysight.com |
45b61294eff35191dccce7034918c0fe264fb4b6 | 6273c51bd8ccb582ce86085837b11c77b5cd78d8 | /probability_calculator.py | c89002ff15c4e16a2b641980005905d03fec292d | [] | no_license | diascar/Scientific_computing_python_certification | 0bd7a581c1c98fbcd854ca70d739a3b80125e1ca | e41263fc7a426a1c0d8cfa7ec01baac0d15435e0 | refs/heads/main | 2023-01-27T14:12:15.490368 | 2020-12-10T16:08:44 | 2020-12-10T16:08:44 | 320,321,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import random
import copy
class Hat:
def __init__(self, **ball_colors):
self.contents = [k for k,v in ball_colors.items() for i in range(v)]
def draw(self, numBalls):
if numBalls >= len(self.contents):
return self.contents
else:
draw_index = sorted(random.sample(range(len(self.contents)), numBalls), reverse = True)
return [self.contents.pop(i) for i in draw_index]
def experiment(hat: Hat, expected_balls: dict, num_balls_drawn: int, num_experiments:int):
n = 0
for i in range(num_experiments):
cpHat = copy.deepcopy(hat)
tmp_draw = cpHat.draw(num_balls_drawn)
result = all([tmp_draw.count(k) >= v for k,v in expected_balls.items()])
if result:
n += 1
return n/num_experiments
| [
"dias.car@outlook.com"
] | dias.car@outlook.com |
82ea0bdbe9f4985777264a35eae47a095cffe75c | 1e1c26ee4c7115f5ec0415974dbae31a3133c3dd | /pyhedrals/pyhedrals.py | 045a6d98acc2e56144d21db361393b076127ce5b | [
"MIT"
] | permissive | HubbeKing/pyhedrals | dadd244ffc28fc3cfc468a9240fddae0e9b036a2 | d4c6063b6c506b5e0f9b72d883e630cfe4d5aed5 | refs/heads/master | 2020-04-12T03:39:47.425481 | 2018-12-18T04:30:43 | 2018-12-18T04:30:43 | 162,273,413 | 0 | 0 | null | 2018-12-18T10:49:25 | 2018-12-18T10:49:24 | null | UTF-8 | Python | false | false | 15,391 | py | # -*- coding: utf-8 -*-
import random
import operator
from builtins import range
import heapq
from collections import Counter as mset
from sly import Lexer, Parser
class UnknownCharacterException(Exception):
pass
class SyntaxErrorException(Exception):
pass
class InvalidOperandsException(Exception):
pass
class Die(object):
def __init__(self, numSides):
self.numSides = numSides
self.value = random.randint(1, self.numSides)
self.exploded = False
self.dropped = False
def __str__(self):
value = str(self.value)
if self.exploded:
value = '*{}*'.format(value)
if self.dropped:
value = '-{}-'.format(value)
return value
def __lt__(self, other):
return self.value < other.value
class RollList(object):
def __init__(self, numDice, numSides):
self.numDice = numDice
self.numSides = numSides
self.rolls = [Die(numSides) for _ in range(0, numDice)]
self.count = False
def sum(self):
return sum(self.getDieValue(r) for r in self.rolls if not r.dropped)
def getDieValue(self, d):
if self.count:
return 1
else:
return d.value
def sort(self, reverse=False):
self.rolls = sorted(self.rolls, reverse=reverse)
def __str__(self):
return '{}d{}: {} ({})'.format(self.numDice, self.numSides,
','.join(str(die) for die in self.rolls),
self.sum())
# Calculate the column position of the given token.
# input is the input text string
# token is a token instance
def _findColumn(text, token):
if token is not None:
last_cr = text.rfind('\n', 0, token.index)
if last_cr < 0:
last_cr = 0
column = (token.index - last_cr) + 1
return column
else:
return 'unknown'
class DiceLexer(Lexer):
tokens = {NUMBER,
PLUS, MINUS,
TIMES, DIVIDE, MODULUS,
EXPONENT,
KEEPHIGHEST, KEEPLOWEST,
DROPHIGHEST, DROPLOWEST,
EXPLODE,
REROLL,
COUNT,
SORT,
DICE,
LPAREN, RPAREN,
COMMENT}
ignore = ' \t'
# Tokens
PLUS = r'\+'
MINUS = r'-'
TIMES = r'\*'
DIVIDE = r'/'
MODULUS = r'%'
EXPONENT = r'\^'
KEEPHIGHEST = r'kh'
KEEPLOWEST = r'kl'
DROPHIGHEST = r'dh'
DROPLOWEST = r'dl'
EXPLODE = r'!([<>]=?)?'
REROLL = r'ro?([<>]=?)?'
COUNT = r'c([<>]=?)?'
SORT = r's[ad]?'
DICE = r'd'
LPAREN = r'\('
RPAREN = r'\)'
@_(r'\d+')
def NUMBER(self, t):
try:
if len(t.value) < 100:
t.value = int(t.value)
else:
raise ValueError
except ValueError:
t.value = 0
return t
@_(r'\#.*')
def COMMENT(self, t):
t.value = str(t.value)[1:].strip()
return t
@_(r'\n+')
def ignore_newline(self, t):
self.lineno += len(t.value)
def error(self, t):
col = _findColumn(self.text, t)
raise UnknownCharacterException("unknown character '{}' (col {})".format(t.value[0], col))
class DiceParser(Parser):
def __init__(self, maxDice=10000, maxSides=10000, maxExponent=10000, maxMult=1000000):
self.MAX_DICE = maxDice
self.MAX_SIDES = maxSides
self.MAX_EXPONENT = maxExponent
self.MAX_MULT = maxMult
self.rolls = []
self.description = None
tokens = DiceLexer.tokens
# Parsing rules
precedence = (('left', PLUS, MINUS),
('left', TIMES, DIVIDE, MODULUS),
('left', EXPONENT),
('left', KEEPHIGHEST, KEEPLOWEST,
DROPHIGHEST, DROPLOWEST,
EXPLODE, REROLL, COUNT,
SORT),
('left', DICE),
('right', UMINUS),
('right', UDICE))
@_('expr PLUS expr',
'expr MINUS expr',
'expr TIMES expr',
'expr DIVIDE expr',
'expr MODULUS expr',
'expr EXPONENT expr')
def expr(self, p):
op = p[1]
left = self._sumDiceRolls(p.expr0)
right = self._sumDiceRolls(p.expr1)
if op == '+':
return operator.add(left, right)
elif op == '-':
return operator.sub(left, right)
elif op == '*':
if (-self.MAX_MULT <= left <= self.MAX_MULT and
-self.MAX_MULT <= right <= self.MAX_MULT):
return operator.mul(left, right)
else:
raise InvalidOperandsException(
'multiplication operands are larger than the maximum {}'
.format(self.MAX_MULT))
elif op == '/':
return operator.floordiv(left, right)
elif op == '%':
return operator.mod(left, right)
elif op == '^':
if (-self.MAX_EXPONENT <= left <= self.MAX_EXPONENT and
-self.MAX_EXPONENT <= right <= self.MAX_EXPONENT):
return operator.pow(left, right)
else:
raise InvalidOperandsException(
'operand or exponent is larger than the maximum {}'
.format(self.MAX_EXPONENT))
@_('MINUS expr %prec UMINUS')
def expr(self, p):
return operator.neg(self._sumDiceRolls(p.expr))
@_('dice_expr')
def expr(self, p):
return p.dice_expr
@_('expr DICE expr')
def dice_expr(self, p):
return self._rollDice(p.expr0, p.expr1)
@_('DICE expr %prec UDICE')
def dice_expr(self, p):
return self._rollDice(1, p.expr)
@_('')
def empty(self, p):
pass
@_('dice_expr KEEPHIGHEST expr',
'dice_expr KEEPLOWEST expr',
'dice_expr DROPHIGHEST expr',
'dice_expr DROPLOWEST expr',
'dice_expr KEEPHIGHEST empty',
'dice_expr KEEPLOWEST empty',
'dice_expr DROPHIGHEST empty',
'dice_expr DROPLOWEST empty')
def dice_expr(self, p):
rollList = p.dice_expr
op = p[1]
keepDrop = self._sumExpr(p) or 1
# filter dice that have already been dropped
validRolls = [r for r in rollList.rolls if not r.dropped]
# if it's a drop op, invert the number into a keep count
if op.startswith('d'):
opType = 'drop'
keepDrop = len(validRolls) - keepDrop
else:
opType = 'keep'
if len(validRolls) < keepDrop:
raise InvalidOperandsException(
'attempted to {} {} dice when only {} were rolled'
.format(opType, keepDrop, len(validRolls)))
if op == 'kh' or op == 'dl':
keptRolls = heapq.nlargest(keepDrop, validRolls)
elif op == 'kl' or op == 'dh':
keptRolls = heapq.nsmallest(keepDrop, validRolls)
else:
raise NotImplementedError(
"operator '{}' is not implemented (also, this should be impossible?)"
.format(op))
# determine which rolls were dropped, and mark them as such
dropped = list((mset(validRolls) - mset(keptRolls)).elements())
for drop in dropped:
index = rollList.rolls.index(drop)
rollList.rolls[index].dropped = True
return rollList
@_('dice_expr EXPLODE expr',
'dice_expr EXPLODE empty')
def dice_expr(self, p):
rollList = p.dice_expr
op = p.EXPLODE
threshold = self._sumExpr(p) or rollList.numSides
comp = self._getComparisonOp('explode', op, p, threshold, rollList.numSides)
debrisList = []
def explode(die):
die.exploded = True
debris = Die(die.numSides)
debrisList.append(debris)
if comp(debris.value, threshold):
explode(debris)
for roll in rollList.rolls:
if comp(roll.value, threshold):
explode(roll)
rollList.rolls.extend(debrisList)
return rollList
@_('dice_expr REROLL expr',
'dice_expr REROLL empty')
def dice_expr(self, p):
rollList = p.dice_expr
op = p.REROLL
threshold = self._sumExpr(p) or 1
comp = self._getComparisonOp('reroll', op, p, threshold, rollList.numSides)
rerollList = []
def reroll(die, recurse=True):
die.dropped = True
rerollDie = Die(die.numSides)
rerollList.append(rerollDie)
if recurse and comp(rerollDie.value, threshold):
reroll(rerollDie)
recurse = True
if len(op) > 1 and op[1] == 'o':
recurse = False
for roll in rollList.rolls:
if comp(roll.value, threshold):
reroll(roll, recurse=recurse)
rollList.rolls.extend(rerollList)
return rollList
@_('dice_expr COUNT expr',
'dice_expr COUNT empty')
def dice_expr(self, p):
rollList = p.dice_expr
op = p.COUNT
threshold = self._sumExpr(p) or rollList.numSides
comp = self._getComparisonOp('count', op, p, threshold, rollList.numSides)
# filter dice that have already been dropped
validRolls = [r for r in rollList.rolls if not r.dropped]
for roll in validRolls:
if not comp(roll.value, threshold):
roll.dropped = True
rollList.count = True
return rollList
def _sumExpr(self, p):
if 'expr' in p._namemap:
return self._sumDiceRolls(p.expr)
def _getComparisonOp(self, opName, op, p, threshold, numSides):
comp = operator.eq
if op.endswith('<'):
if threshold > numSides:
raise InvalidOperandsException(
"{} threshold '<{}' is invalid with {} sided dice"
.format(opName, threshold, numSides))
comp = operator.lt
elif op.endswith('>'):
if threshold < 1:
raise InvalidOperandsException(
"{} threshold '>{}' is invalid"
.format(opName, threshold))
comp = operator.gt
elif op.endswith('<='):
if threshold >= numSides:
raise InvalidOperandsException(
"{} threshold '<={}' is invalid with {} sided dice"
.format(opName, threshold, numSides))
comp = operator.le
elif op.endswith('>='):
if threshold <= 1:
raise InvalidOperandsException(
"{} threshold '>={}' is invalid"
.format(opName, threshold))
comp = operator.ge
if comp == operator.eq:
if not 1 <= threshold <= numSides:
raise InvalidOperandsException(
"{} threshold '{}' is invalid with {} sided dice"
.format(opName, threshold, numSides))
else:
if 'expr' not in p._namemap:
raise InvalidOperandsException(
"no parameter given to {} comparison"
.format(opName))
return comp
@_('dice_expr SORT')
def dice_expr(self, p):
rollList = p.dice_expr
op = p.SORT
reverse = False
if op == 'sd':
reverse = True
rollList.sort(reverse)
return rollList
@_('LPAREN expr RPAREN')
def expr(self, p):
return p.expr
@_('NUMBER')
def expr(self, p):
return p.NUMBER
@_('expr COMMENT')
def expr(self, p):
self.description = p.COMMENT
return p.expr
def error(self, p):
if p is None:
raise SyntaxErrorException("syntax error at the end of the given expression")
col = _findColumn(self._dice_expr, p)
raise SyntaxErrorException(
"syntax error at '{}' (col {})"
.format(p.value, col))
def _rollDice(self, numDice, numSides):
numDice = self._sumDiceRolls(numDice)
numSides = self._sumDiceRolls(numSides)
if numDice > self.MAX_DICE:
raise InvalidOperandsException(
'attempted to roll more than {} dice in a single d expression'
.format(self.MAX_DICE))
if numSides > self.MAX_SIDES:
raise InvalidOperandsException(
'attempted to roll a die with more than {} sides'
.format(self.MAX_SIDES))
if numDice < 0:
raise InvalidOperandsException(
'attempted to roll a negative number of dice')
if numSides < 0:
raise InvalidOperandsException(
'attempted to roll a die with a negative number of sides')
if numSides < 1:
raise InvalidOperandsException(
'attempted to roll a die with zero sides')
return RollList(numDice, numSides)
def _sumDiceRolls(self, rollList):
"""convert from dice roll structure to a single integer result"""
if isinstance(rollList, RollList):
self.rolls.append(rollList)
return rollList.sum()
else:
return rollList
class DiceRoller(object):
def __init__(self, maxDice=10000, maxSides=10000, maxExponent=10000, maxMult=1000000):
self.lexer = DiceLexer()
self.parser = DiceParser(maxDice, maxSides, maxExponent, maxMult)
def reset(self):
self.parser.rolls = []
self.parser.description = None
def parse(self, dice_expr):
self.parser._dice_expr = dice_expr
self.reset()
result = self.parser.parse(self.lexer.tokenize(dice_expr))
result = self.parser._sumDiceRolls(result)
self.description = self.parser.description
return result
def getRollStrings(self):
rollStrings = (str(roll) for roll in self.parser.rolls)
return rollStrings
def main():
import argparse
argparser = argparse.ArgumentParser(description='An interpreter for dice expressions.')
argparser.add_argument('-v', '--verbose', help='print all roll results', action='store_true')
argparser.add_argument('diceexpr', help='the dice expression you want to execute', type=str)
cmdArgs = argparser.parse_args()
roller = DiceRoller()
try:
result = roller.parse(cmdArgs.diceexpr)
except OverflowError:
print('Error: result too large to calculate')
return
except (ZeroDivisionError,
UnknownCharacterException,
SyntaxErrorException,
InvalidOperandsException,
RecursionError,
NotImplementedError) as e:
print('Error: {}'.format(e))
return
if roller.description:
result = '{} {}'.format(result, roller.description)
if cmdArgs.verbose:
rollStrings = roller.getRollStrings()
rollString = ' | '.join(rollStrings)
print('{}{}'.format('[{}] '.format(rollString) if rollString else '', result))
return
print(result)
if __name__ == '__main__':
main()
| [
"679547+StarlitGhost@users.noreply.github.com"
] | 679547+StarlitGhost@users.noreply.github.com |
151fc3d2539d862a7734646168592b44017b3203 | 5b76874173ce8890906226b6318f714e649588d6 | /src/test2/settings.py | 97bbe6fcd7a5e7b2261942b26fe9e3122c62c507 | [] | no_license | matthewyau122/test2 | 9f17a43da59b68da766ba782b7d8be98c600f557 | 62cba1cde52281a9afde23c0a8a735d7cb6fea80 | refs/heads/master | 2020-04-12T11:12:30.357479 | 2018-12-19T15:01:49 | 2018-12-19T15:01:49 | 162,452,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,085 | py | """
Django settings for test2 project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1ou95pyfwz8g3s@25=j+zami_bph8equct8ilw4xb_%nzg8tit'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"matthewyau122@gmail.com"
] | matthewyau122@gmail.com |
824a8a299cdea984c99f9a2b32fe5eb4b4918082 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/traits/tests/test_container_events.py | 7ccbe4fccef1ce095b0e9bfabb18e60297996524 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 5,193 | py | # ------------------------------------------------------------------------------
#
# Copyright (c) 2007, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# ------------------------------------------------------------------------------
"""
Tests for Dict items_changed events
"""
from __future__ import absolute_import, print_function
from traits.testing.unittest_tools import unittest
from traits.api import HasTraits, Dict
class MyClass(HasTraits):
""" A dummy HasTraits class with a Dict """
d = Dict({"a": "apple", "b": "banana", "c": "cherry", "d": "durian"})
def __init__(self, callback):
"The callback is called with the TraitDictEvent instance"
self.callback = callback
return
def _d_items_changed(self, event):
if self.callback:
self.callback(event)
return
class MyOtherClass(HasTraits):
""" A dummy HasTraits class with a Dict """
d = Dict({"a": "apple", "b": "banana", "c": "cherry", "d": "durian"})
class Callback:
"""
A stateful callback that gets initialized with the values to check for
"""
def __init__(self, obj, added={}, changed={}, removed={}):
self.obj = obj
self.added = added
self.changed = changed
self.removed = removed
self.called = False
return
def __call__(self, event):
if event.added != self.added:
print("\n\n******Error\nevent.added:", event.added)
else:
self.obj.assertEqual(event.added, self.added)
self.obj.assertEqual(event.changed, self.changed)
self.obj.assertEqual(event.removed, self.removed)
self.called = True
return
class DictEventTestCase(unittest.TestCase):
def test_setitem(self):
# overwriting an existing item
cb = Callback(self, changed={"c": "cherry"})
foo = MyClass(cb)
foo.d["c"] = "coconut"
self.assertTrue(cb.called)
# adding a new item
cb = Callback(self, added={"g": "guava"})
bar = MyClass(cb)
bar.d["g"] = "guava"
self.assertTrue(cb.called)
return
def test_delitem(self):
cb = Callback(self, removed={"b": "banana"})
foo = MyClass(cb)
del foo.d["b"]
self.assertTrue(cb.called)
return
def test_clear(self):
removed = MyClass(None).d.copy()
cb = Callback(self, removed=removed)
foo = MyClass(cb)
foo.d.clear()
self.assertTrue(cb.called)
return
def test_update(self):
update_dict = {"a": "artichoke", "f": "fig"}
cb = Callback(self, changed={"a": "apple"}, added={"f": "fig"})
foo = MyClass(cb)
foo.d.update(update_dict)
self.assertTrue(cb.called)
return
def test_setdefault(self):
# Test retrieving an existing value
cb = Callback(self)
foo = MyClass(cb)
self.assertEqual(foo.d.setdefault("a", "dummy"), "apple")
self.assertFalse(cb.called)
# Test adding a new value
cb = Callback(self, added={"f": "fig"})
bar = MyClass(cb)
self.assertTrue(bar.d.setdefault("f", "fig") == "fig")
self.assertTrue(cb.called)
return
def test_pop(self):
# Test popping a non-existent key
cb = Callback(self)
foo = MyClass(cb)
self.assertEqual(foo.d.pop("x", "dummy"), "dummy")
self.assertFalse(cb.called)
# Test popping a regular item
cb = Callback(self, removed={"c": "cherry"})
bar = MyClass(cb)
self.assertEqual(bar.d.pop("c"), "cherry")
self.assertTrue(cb.called)
return
def test_popitem(self):
foo = MyClass(None)
foo.d.clear()
foo.d["x"] = "xylophone"
cb = Callback(self, removed={"x": "xylophone"})
foo.callback = cb
self.assertEqual(foo.d.popitem(), ("x", "xylophone"))
self.assertTrue(cb.called)
return
def test_dynamic_listener(self):
foo = MyOtherClass()
# Test adding
func = Callback(self, added={"g": "guava"})
foo.on_trait_change(func.__call__, "d_items")
foo.d["g"] = "guava"
foo.on_trait_change(func.__call__, "d_items", remove=True)
self.assertTrue(func.called)
# Test removing
func2 = Callback(self, removed={"a": "apple"})
foo.on_trait_change(func2.__call__, "d_items")
del foo.d["a"]
foo.on_trait_change(func2.__call__, "d_items", remove=True)
self.assertTrue(func2.called)
# Test changing
func3 = Callback(self, changed={"b": "banana"})
foo.on_trait_change(func3.__call__, "d_items")
foo.d["b"] = "broccoli"
foo.on_trait_change(func3.__call__, "d_items", remove=True)
self.assertTrue(func3.called)
return
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
859947adf97a94df19974a51555ff9c2bc6505e4 | 601fd7ba7f0f1064dd92e8b4cdc1262cb7b75363 | /dst/data/decoding.py | 9c7c09fddc053f07d433683b0c2e994618a7ecae | [
"MIT"
] | permissive | shmpanski/deep-summarization-toolkit | c5a8e674376b549cbc77cf3ed84693ce73455a0c | e249b7c31c817fedbc3133a3799c23a0115091bd | refs/heads/master | 2022-08-28T17:53:50.429286 | 2019-05-18T16:40:35 | 2019-05-18T16:40:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | from typing import List, Tuple
import torch
class BeamSearch:
"""BeamSearch Decoder.
Args:
k (int): Beam size.
device (str, optional): Defaults to 'cpu'. Selected device.
"""
def __init__(self, k: int, device='cpu'):
self.k = k
self.device = torch.device(device)
self.scores = None
self.sequences = None
def initial_update(self, probs: torch.FloatTensor):
scores = torch.log(probs)
top_scores, top_tokens = scores.topk(self.k)
self.sequences = [[token.item()] for token in top_tokens]
self.scores = top_scores.view(self.k, 1)
def update(self, probs: torch.FloatTensor):
"""Update beam.
Args:
probs (torch.FloatTensor): Probability distribution of vocabulary for each beam
of shape ``(k, vocab_size)``. For initial update shape must be ``(vocab_size, )``.
"""
if self.scores is None:
assert len(probs.shape) == 1, "Initial update must be done with single-beam prob distribution"
self.initial_update(probs)
return
else:
assert len(probs.shape) == 2, "Update probs must be a matrix of sizes ``(k, vocab)``"
assert probs.shape[0] == self.k, "Update must be done with k-beam prob distribution"
probs_scores = self.scores + torch.log(probs)
probs_scores = probs_scores.detach().cpu()
top_k_scores, top_k_tokens = probs_scores.topk(self.k)
top_k_seq_idx = torch.arange(self.k).view(self.k, 1).repeat(1, self.k)
top_k_scores, top_k_tokens, top_k_seq_idx = [t.view(-1) for t in [top_k_scores, top_k_tokens, top_k_seq_idx]]
top_scores, indices = top_k_scores.topk(self.k)
top_tokens = top_k_tokens.take(indices)
top_seq_idx = top_k_seq_idx.take(indices)
_sequences = [[]] * self.k
for i, seq_idx in enumerate(top_seq_idx):
_sequences[i] = self.sequences[seq_idx] + [top_tokens[i].item()]
self.sequences = _sequences
self.scores = top_scores.view(self.k, 1).to(self.device)
def search(self) -> torch.LongTensor:
"""Find best ``k`` sequence.
Returns:
torch.LongTensor: Decoded sequences.
"""
return torch.LongTensor(self.sequences).to(self.device)
| [
"gooppe@yandex.ru"
] | gooppe@yandex.ru |
e835d7a6a7ef6f8e1aa01094b3b6f829c0c432b1 | e9880afeafb0ebc5d2ab4ea6e5a8977fa129d3af | /client/spectre_client/client.py | 846413c26a681780b772c536dd031675d618b582 | [] | no_license | nksaraf/spectre | c0714e9bdfdac582dcb40b2ed71a6d485d32d69b | fd9870fa1d187507b8b274fcd24497b3460687bf | refs/heads/master | 2021-09-28T14:07:57.675543 | 2018-11-17T22:24:16 | 2018-11-17T22:24:16 | 91,722,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | import sys
import socket
import select
import json
from constants import *
import utils
import error
class Client():
def __init__(self, name, role, address, handler):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.connect(address)
self.name = name
self.properties = {}
self.properties["name"] = name
self.properties["type"] = role
self.handler = handler
self.id()
def id(self):
while True:
self.send_data(Action.ID, "")
data = self.get_data()
if data["action"] == ServerAction.ID and data["status"] == 'OK':
print('{}: [{}] {}'.format(data["name"], data["status"], data["content"]))
break
else:
time.sleep(2)
def run(self):
raise NotImplementedError()
def get_data(self):
length = None
buf = ""
while True:
data = self.socket.recv(BUF_SIZE)
if not data:
raise error.ConnectionClosedError()
buf += str(data, 'utf-8')
while True:
if length is None:
if '#' not in buf:
break
length_str, ign, buf = buf.partition('#')
length = int(length_str)
if len(buf) < length:
break
try:
return json.loads(buf[:length])
except:
return buf[:length]
def send_data(self, action, data):
to_send = {}
to_send["action"] = action
to_send["content"] = data
to_send = dict(to_send, **self.properties)
self.socket.send(bytes(utils.proto_string(json.dumps(to_send)), 'utf-8'))
if __name__ == '__main__':
try:
client = Client('nikhil', 'user', ADDRESS, None)
client.run()
except (KeyboardInterrupt, SystemExit):
client.socket.close()
sys.exit(0) | [
"nsaraf98@gmail.com"
] | nsaraf98@gmail.com |
84cf97d26dd11ddf99377620de38ad861555c852 | d30ff935c61bdc748db551f82979c79867c02e3a | /demo_28.py | 592603c5c4d1ac90e598e56f9e8afe96000fdbc0 | [] | no_license | chenyongzhen-oneShallot/Demo-python | 55f17ae2d769dd3e01c308495f5a391954bd3cdb | 2a8936d4a763132c43fa3c106abf0d95f68c94e7 | refs/heads/master | 2021-09-18T04:21:36.509158 | 2018-07-09T14:56:42 | 2018-07-09T14:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | #coding=utf-8
list=range(1,10)
print([x*x for x in list])
list1=['A','B','C']
list2=['X','Y','Z']
print([m+n for m in list1 for n in list2])
print([x*x for x in list if x%2==0] ) | [
"891354032@qq.com"
] | 891354032@qq.com |
046841638569002374bf14f9e0d8ce60319efd78 | 3e498ff1fe84720875d4af76897bb8dd5a4d2136 | /diary/jsk/17Oct_wk34/positive_pruning.py | b01133fddfc037f4de6d9f04534b6c1f155eaebf | [] | no_license | jehoons/sbie_aging | 24dcf56cbcdfb447b2c3c77c6400f94557f439c1 | 19c2e2e4f4a823a89594f3ea782eb68ed0e08b55 | refs/heads/master | 2021-01-11T18:05:23.538529 | 2020-09-28T06:58:51 | 2020-09-28T06:58:51 | 79,891,078 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,763 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
miraw = pd.read_csv('MI_raw.csv', index_col=0)
agingnetwork = pd.read_csv('Aging_network.csv')
abmap = pd.read_csv('antibody_mapping.csv', index_col=0)
#a/f feedback
#plotting the relationship between mutual information cutoff and the number of interactions
colist=[]
nlist=[]
for cutoff in range(12, 27):
cutoff = cutoff/10
hicormi = miraw > cutoff
n = sum(np.array(hicormi).flatten().astype(int))
colist.append(cutoff)
nlist.append(n)
plt.plot(colist, nlist)
plt.xlabel('cutoff')
plt.ylabel('MI pairs')
#prune selected interactions based on the appropriate cutoffs
#miraw > 2.4
hicormi24 = miraw > 2.4
n24 = sum(np.array(hicormi24).flatten().astype(int))
print('miraw > 2.4, ' + str(n24))
himi24 = []
for i in range(len(hicormi24)):
for j in range(len(hicormi24)):
if hicormi24.iloc[i][j]:
print(hicormi24.index[i], hicormi24.columns.values[j])
himi24.append([hicormi24.index[i], hicormi24.columns.values[j], miraw.iloc[i][j]])
#miraw > 2.2
hicormi22 = miraw > 2.2
n22 = sum(np.array(hicormi22).flatten().astype(int))
print('miraw > 2.2, ' + str(n22))
himi22 = []
for i in range(len(hicormi22)):
for j in range(len(hicormi22)):
if hicormi22.iloc[i][j]:
print(hicormi22.index[i], hicormi22.columns.values[j])
himi22.append([hicormi22.index[i], hicormi22.columns.values[j], miraw.iloc[i][j]])
#miraw > 2.0
hicormi20 = miraw > 2.0
n20 = sum(np.array(hicormi20).flatten().astype(int))
print('miraw > 2.0, ' + str(n20))
himi20 = []
for i in range(len(hicormi20)):
for j in range(len(hicormi20)):
if hicormi20.iloc[i][j]:
print(hicormi20.index[i], hicormi20.columns.values[j])
himi20.append([hicormi20.index[i], hicormi20.columns.values[j], miraw.iloc[i][j]])
#miraw > 1.8
hicormi18 = miraw > 1.8
n18 = sum(np.array(hicormi18).flatten().astype(int))
print('miraw > 1.8, ' + str(n18))
himi18 = []
for i in range(len(hicormi18)):
for j in range(len(hicormi18)):
if hicormi18.iloc[i][j]:
print(hicormi18.index[i], hicormi18.columns.values[j])
himi18.append([hicormi18.index[i], hicormi18.columns.values[j], miraw.iloc[i][j]])
#miraw > .1; original
hicormiori = miraw > .1
nori = sum(np.array(hicormiori).flatten().astype(int))
print('pruned with all data, ' + str(nori))
himiori = []
for i in range(len(hicormiori)):
for j in range(len(hicormiori)):
if hicormiori.iloc[i][j]:
print(hicormiori.index[i], hicormiori.columns.values[j])
himiori.append([hicormiori.index[i], hicormiori.columns.values[j], miraw.iloc[i][j]])
#prune existing PKN using the pruned interaction information
agingnetwork24 = []
for sigmi in himi24:
if abmap['include network'][sigmi[0]] == 'o' and abmap['include network'][sigmi[1]] == 'o':
for idx in agingnetwork.index:
if abmap['symbol'][sigmi[0]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[1]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork24:
agingnetwork24.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
elif abmap['symbol'][sigmi[1]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[0]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork24:
agingnetwork24.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
agingnetwork22 = []
for sigmi in himi22:
if abmap['include network'][sigmi[0]] == 'o' and abmap['include network'][sigmi[1]] == 'o':
for idx in agingnetwork.index:
if abmap['symbol'][sigmi[0]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[1]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork22:
agingnetwork22.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
elif abmap['symbol'][sigmi[1]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[0]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork22:
agingnetwork22.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
agingnetwork20 = []
for sigmi in himi20:
if abmap['include network'][sigmi[0]] == 'o' and abmap['include network'][sigmi[1]] == 'o':
for idx in agingnetwork.index:
if abmap['symbol'][sigmi[0]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[1]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork20:
agingnetwork20.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
elif abmap['symbol'][sigmi[1]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[0]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork20:
agingnetwork20.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
agingnetwork18 = []
for sigmi in himi18:
if abmap['include network'][sigmi[0]] == 'o' and abmap['include network'][sigmi[1]] == 'o':
for idx in agingnetwork.index:
if abmap['symbol'][sigmi[0]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[1]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork18:
agingnetwork18.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
elif abmap['symbol'][sigmi[1]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[0]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetwork18:
agingnetwork18.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
agingnetworkori = []
for sigmi in himiori:
if abmap['include network'][sigmi[0]] == 'o' and abmap['include network'][sigmi[1]] == 'o':
for idx in agingnetwork.index:
if abmap['symbol'][sigmi[0]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[1]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetworkori:
agingnetworkori.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
elif abmap['symbol'][sigmi[1]] == agingnetwork['Source'][idx]:
if abmap['symbol'][sigmi[0]] == agingnetwork['Target'][idx]:
if not [agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]] in agingnetworkori:
agingnetworkori.append([agingnetwork['Source'][idx], agingnetwork['Regulation Type'][idx], agingnetwork['Target'][idx]])
#export the resulting network files to .sif format
agingnetwork24 = pd.DataFrame(agingnetwork24)
agingnetwork24.to_csv('agingnetwork24p.sif', sep='\t', index=None, header=None)
agingnetwork22 = pd.DataFrame(agingnetwork22)
agingnetwork22.to_csv('agingnetwork22p.sif', sep='\t', index=None, header=None)
agingnetwork20 = pd.DataFrame(agingnetwork20)
agingnetwork20.to_csv('agingnetwork20p.sif', sep='\t', index=None, header=None)
agingnetwork18 = pd.DataFrame(agingnetwork18)
agingnetwork18.to_csv('agingnetwork18p.sif', sep='\t', index=None, header=None)
agingnetworkori = pd.DataFrame(agingnetworkori)
agingnetworkori.to_csv('agingnetworkori.sif', sep='\t', index=None, header=None)
| [
"reality312@kaist.ac.kr"
] | reality312@kaist.ac.kr |
50be063895332b73baf258fc0eb8bb53092d1908 | 660a2d5f775c49a071fb7e9d61c1b4963bcbfd7e | /CatDog/test.py | 5d29f90be4cfd2f4a34a4249f298dd06fd5199cd | [] | no_license | WXiaoman/mytensorflow | eed0e684523069412bd88c411a1d429970a20564 | 7b838ade92485c2174389a0d48ad73e1ba59dce0 | refs/heads/master | 2020-04-28T15:31:54.489356 | 2019-05-16T03:14:56 | 2019-05-16T03:14:56 | 175,377,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | import tensorflow as tf
import numpy as np
import os, cv2
image_size = 64
num_channels = 3
images = []
path = "D:/anicode/spyderworkspace/catordog/testing_data"
direct = os.listdir(path)
for file in direct:
image = cv2.imread(path + '/' + file)
print("adress:", path + '/' + file)
image = cv2.resize(image, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0 / 255.0)
for img in images:
x_batch = img.reshape(1, image_size, image_size, num_channels)
sess = tf.Session()
# step1网络结构图
saver = tf.train.import_meta_graph('./dogs-cats-model/dog-cat.ckpt-7975.meta')
# step2加载权重参数
saver.restore(sess, './dogs-cats-model/dog-cat.ckpt-7975')
# 获取默认的图
graph = tf.get_default_graph()
y_pred = graph.get_tensor_by_name("y_pred:0")
x = graph.get_tensor_by_name("x:0")
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, 2))
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result = sess.run(y_pred, feed_dict_testing)
res_label = ['dog', 'cat']
print(res_label[result.argmax()])
| [
"wxm2418341934@163。com"
] | wxm2418341934@163。com |
0330944a234507649832eb94badabbf3a9353faf | 5a9194df7e40b1f9694576c88c536b24d22f548b | /tests/projects/test_delete_project.py | 96d89108aa6b51da09f660ffdc8b7e8fd51e6b38 | [] | no_license | jamesstidard/Talk-Zoho | 17230611e40e5c232dcd33bdbd5148ba20543810 | 3a918d72146dae1ed6bb8afee09dfe658a540048 | refs/heads/master | 2021-05-08T10:44:05.881154 | 2017-03-03T16:49:34 | 2017-03-03T16:49:34 | 119,862,940 | 0 | 0 | null | 2018-02-01T16:34:38 | 2018-02-01T16:34:38 | null | UTF-8 | Python | false | false | 383 | py | import pytest
from tests.projects.fixtures import * # noqa
@pytest.mark.gen_test
def test_cant_delete_user(projects, portal_id):
# Deleting user with wrong id always returns true (CRM API limitation)
# Pull projects down to lowest common denominator for unified interface.
success = yield projects.projects.delete('123456789', portal_id=portal_id)
assert success
| [
"jamesstidard@gmail.com"
] | jamesstidard@gmail.com |
1963788b916b4fec844fe1d1523a7cfee98a0955 | bd109656f1ea18fe2eae9afffcc0074d75826bb9 | /setup.py | 55a44300afd9c9ab3918a9fb2a7ad146c8367a9b | [
"MIT"
] | permissive | ffreemt/baidu-tr-async-free | 4db5356e24e1ac818a6f641ccad7093113dd32ec | 3bf422e8d8406123479c5bcdb679af795db0ba8f | refs/heads/master | 2021-01-02T21:41:05.188476 | 2020-02-12T06:15:30 | 2020-02-12T06:15:30 | 239,812,578 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | ''' setup '''
# pylint: disable=invalid-name
from pathlib import Path
import re
from setuptools import setup, find_packages
name = """baidu-tr-async-free"""
description = 'baidu translate for free with async and proxy support'
dir_name, *_ = find_packages()
# dir_name = 'bdtr_async'
curr_dir = Path(__file__).parent
_ = Path(f'{dir_name}/__init__.py').read_text(encoding='utf-8')
version, *_ = re.findall(r"__version__\W*=\W*'([^']+)'", _)
targz = 'v_' + version.replace('.', '') + '.tar.gz'
install_requires = ['httpx', 'loguru', 'google-sign']
README_rst = f'{curr_dir}/README.md'
long_description = (
open(README_rst, encoding='utf-8').read() if Path(README_rst).exists() else ''
)
setup(
name=name,
packages=find_packages(),
version=version,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
keywords=['machine translation', 'free', 'sign'],
author="mikeee",
url=fr'http://github.com/ffreemt/{name}',
download_url=fr'https://github.com/ffreemt/{name}/archive/' + targz,
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
license='MIT License',
)
| [
"yucongo+fmt@gmail.com"
] | yucongo+fmt@gmail.com |
717113e4b09675684d9bf7407e560163fbcb0dc1 | 6e4783a050e438b0b54e4a3f560faa8d97d7197a | /alibi_detect/cd/tensorflow/classifier.py | 8fdbfd8456be3dcef232b8256895a34853e642e3 | [
"Apache-2.0"
] | permissive | chaitalibodke/alibi-detect | a68641ab6c1269074e6f6f9acce1463a2c07d40b | 5927d5b4bd8a4f7209cd42db344145f734074d6b | refs/heads/master | 2023-08-19T05:42:49.233295 | 2021-09-29T09:59:29 | 2021-09-29T09:59:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,153 | py | from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import BinaryCrossentropy
from scipy.special import softmax
from typing import Callable, Dict, Optional, Tuple
from alibi_detect.cd.base import BaseClassifierDrift
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow.misc import clone_model
from alibi_detect.utils.tensorflow.prediction import predict_batch
class ClassifierDriftTF(BaseClassifierDrift):
def __init__(
self,
x_ref: np.ndarray,
model: tf.keras.Model,
p_val: float = .05,
preprocess_x_ref: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'preds',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
TensorFlow classification model used for drift detection.
p_val
p-value used for the significance of the test.
preprocess_x_ref
Whether to already preprocess and store the reference data.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' or 'logits'
binarize_preds
Whether to test for discrepency on soft (e.g. prob/log-prob) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier.
0 is silent, 1 a progress bar and 2 prints the statistics after each epoch.
train_kwargs
Optional additional kwargs when fitting the classifier.
dataset
Dataset object used during training.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
preprocess_x_ref=preprocess_x_ref,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
preds_type=preds_type,
binarize_preds=binarize_preds,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
data_type=data_type
)
self.meta.update({'backend': 'tensorflow'})
# define and compile classifier model
self.original_model = model
self.model = clone_model(model)
self.loss_fn = BinaryCrossentropy(from_logits=(self.preds_type == 'logits'))
self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
self.predict_fn = partial(predict_batch, preprocess_fn=preprocess_batch_fn, batch_size=batch_size)
self.train_kwargs = {'optimizer': optimizer(learning_rate=learning_rate), 'epochs': epochs,
'reg_loss_fn': reg_loss_fn, 'preprocess_fn': preprocess_batch_fn, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
def score(self, x: np.ndarray) -> Tuple[float, float, np.ndarray, np.ndarray]:
"""
Compute the out-of-fold drift metric such as the accuracy from a classifier
trained to distinguish the reference data from the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value, a notion of distance between the trained classifier's out-of-fold performance
and that which we'd expect under the null assumption of no drift,
and the out-of-fold classifier model prediction probabilities on the reference and test data
"""
x_ref, x = self.preprocess(x)
n_ref, n_cur = len(x_ref), len(x)
x, y, splits = self.get_splits(x_ref, x)
# iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
preds_oof_list, idx_oof_list = [], []
for idx_tr, idx_te in splits:
y_tr = np.eye(2)[y[idx_tr]]
if isinstance(x, np.ndarray):
x_tr, x_te = x[idx_tr], x[idx_te]
elif isinstance(x, list):
x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
ds_tr = self.dataset(x_tr, y_tr)
self.model = clone_model(self.original_model) if self.retrain_from_scratch \
else self.model
train_args = [self.model, self.loss_fn, None]
self.train_kwargs.update({'dataset': ds_tr})
trainer(*train_args, **self.train_kwargs) # type: ignore
preds = self.predict_fn(x_te, self.model)
preds_oof_list.append(preds)
idx_oof_list.append(idx_te)
preds_oof = np.concatenate(preds_oof_list, axis=0)
probs_oof = softmax(preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof
idx_oof = np.concatenate(idx_oof_list, axis=0)
y_oof = y[idx_oof]
p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
probs_sort = probs_oof[np.argsort(idx_oof)]
return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1]
| [
"noreply@github.com"
] | chaitalibodke.noreply@github.com |
2ca63a6a91b03fa8268acf13de70a25c244695e2 | 5bcbb0bc8e861d6915d3f4ba88a4cd494d5482e5 | /odd_even.py | 8ba735b8ee3251299df38d50f08b2471dd2538a3 | [] | no_license | MyronCodeBase/odd_even | 6c14bff3066ec2aa18f3ff292600e19e5d5d3a01 | 6537fc78d5c4d3d144c745f9ba019af9df14cbb8 | refs/heads/master | 2021-08-26T09:26:39.658299 | 2017-11-22T23:14:34 | 2017-11-22T23:14:34 | 111,739,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | def odd_even():
num = int(input("Enter a number "))
if num % 2 == 0:
print("The number entered is even")
else:
print("The number entered is odd")
if num % 4 == 0:
print("The number is also a multiple of 4!")
check = int(input("Enter another number "))
if num % check == 0:
print("Numbers divides evenly!")
else:
print("Numbers don't divide evenly")
odd_even() | [
"noreply@github.com"
] | MyronCodeBase.noreply@github.com |
8dc0074c227b31ff2bf80b4b99499cad9ce4f219 | a16ed9d63ac32a0e88bd66f963039b64e209e4a7 | /tests/test_template.py | d059e847464a28dbb47a1cf4977449347a4c1a86 | [
"MIT"
] | permissive | tiffon/take | 22683d55556016664d961c92e88525543af6f937 | 907a2c4a72f5cbd357eadd4837fa4cae23647096 | refs/heads/master | 2022-07-31T20:43:19.104407 | 2015-07-27T16:49:43 | 2015-07-27T16:49:43 | 32,710,551 | 7 | 1 | MIT | 2022-07-06T19:15:33 | 2015-03-23T04:12:49 | Python | UTF-8 | Python | false | false | 17,739 | py | import os
import pytest
from pyquery import PyQuery
from take import TakeTemplate
from take.parser import InvalidDirectiveError, UnexpectedTokenError, TakeSyntaxError
from take.scanner import ScanError
here = os.path.dirname(os.path.abspath(__file__))
with open(here + '/doc.html') as f:
html_fixture = f.read()
pq_doc = PyQuery(html_fixture)
@pytest.mark.basic
class TestBaseFunctionality():
def test_template_compiles(self):
TMPL = """
$ h1 | text
save: value
"""
tt = TakeTemplate(TMPL)
assert tt
def test_save(self):
TMPL = """
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'].html() == pq_doc.html()
def test_save_alias(self):
TMPL = """
: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'].html() == pq_doc.html()
def test_deep_save(self):
TMPL = """
save: parent.value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['parent']['value'].html() == pq_doc.html()
def test_deep_save_alias(self):
TMPL = """
: parent.value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['parent']['value'].html() == pq_doc.html()
def test_save_css_query(self):
TMPL = """
$ h1
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'].html() == pq_doc('h1').html()
def test_save_css_query_hard_tabs(self):
TMPL = """
\t\t\t$ h1
\t\t\t\tsave: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'].html() == pq_doc('h1').html()
def test_save_css_text_query(self):
TMPL = """
$ h1 | text
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': 'Text in h1'}
def test_save_css_index_query(self):
TMPL = """
$ a | 0
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'].html() == pq_doc('a').eq(0).html()
def test_save_css_index_text_query(self):
TMPL = """
$ a | 0 text
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'first nav item'
def test_absent_index(self):
TMPL = """
$ notpresent | 0 text
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == ''
def test_neg_index(self):
TMPL = """
$ a | -1 text
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'second content link'
def test_absent_neg_index(self):
TMPL = """
$ notpresent | -1 text
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == ''
def test_query_deep_save(self):
TMPL = """
$ h1 | text
save: deep.value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'deep': {'value': 'Text in h1'}}
def test_save_attr(self):
TMPL = """
$ h1 | [id]
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'id-on-h1'
def test_save_absent_attr(self):
TMPL = """
$ h1 | [mia]
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == None
def test_sub_ctx_save(self):
TMPL = """
$ section
$ ul | [id]
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': 'second-ul'}
def test_sub_ctx_save_hard_tabs(self):
TMPL = """
\t\t\t$ section
\t\t\t\t\t$ ul\t|\t\t[id]
\t\t\t\t\t\t\tsave: \t \tvalue
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': 'second-ul'}
def test_sub_ctx_save_alias(self):
TMPL = """
$ section
$ ul | [id]
: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': 'second-ul'}
def test_sub_ctx_save_empty(self):
TMPL = """
$ nav
$ ul | 1 [id]
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': None}
def test_sub_ctx_save_alias_empty(self):
TMPL = """
$ nav
$ ul | 1 [id]
: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': None}
def test_exit_sub_ctx_save(self):
TMPL = """
$ nav
$ ul | 0 [id]
save: sub_ctx_value
$ p | text
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'sub_ctx_value': 'first-ul',
'value': 'some description'}
def test_exit_sub_ctx_save_alias(self):
TMPL = """
$ nav
$ ul | 0 [id]
: sub_ctx_value
$ p | text
: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'sub_ctx_value': 'first-ul',
'value': 'some description'}
def test_comments(self):
TMPL = """
# shouldn't affect things
$ nav
# shouldn't affect things
$ ul | 0 [id]
# shouldn't affect things
# shouldn't affect things
save: sub_ctx_value
# shouldn't affect things
$ p | text
# shouldn't affect things
save: value
# shouldn't affect things
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'sub_ctx_value': 'first-ul',
'value': 'some description'}
def test_comments_id_selector(self):
TMPL = """
$ #id-on-h1 | [id]
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'value': 'id-on-h1'}
def test_save_each(self):
TMPL = """
$ nav
$ a
save each: nav
| [href]
save: url
| text
save: text
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
expect = {
'nav': [{
'url': '/local/a',
'text': 'first nav item'
},{
'url': '/local/b',
'text': 'second nav item'
}
]
}
assert data == expect
def test_deep_save_each(self):
TMPL = """
$ nav
$ a
save each: nav.items
| [href]
save: item.url
| text
save: item.text
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
expect = {
'nav': {
'items': [{
'item': {
'url': '/local/a',
'text': 'first nav item'
}
},{
'item': {
'url': '/local/b',
'text': 'second nav item'
}
}
]
}
}
assert data == expect
def test_base_url(self):
TMPL = """
$ a | 0 [href]
save: local
$ a | -1 [href]
save: ext
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data == {'local': '/local/a',
'ext': 'http://ext.com/b'}
data = tt(html_fixture, base_url='http://www.example.com')
assert data == {'local': 'http://www.example.com/local/a',
'ext': 'http://ext.com/b'}
def test_base_url_on_tmpl(self):
TMPL = """
$ a | 0 [href]
save: local
$ a | -1 [href]
save: ext
"""
tt = TakeTemplate(TMPL, base_url='http://www.example.com')
data = tt(html_fixture)
assert data == {'local': 'http://www.example.com/local/a',
'ext': 'http://ext.com/b'}
@pytest.mark.invalid_templates
class TestInvalidTemplates():
def test_invalid_directive_statement_error(self):
TMPL = """
$ h1 | [href]
save fail
"""
with pytest.raises(InvalidDirectiveError):
tt = TakeTemplate(TMPL)
def test_invalid_directive_id_error(self):
TMPL = """
$ h1 | [href]
hm: fail
"""
with pytest.raises(InvalidDirectiveError):
tt = TakeTemplate(TMPL)
def test_invalid_query_error(self):
TMPL = """
.hm | [href]
hm: fail
"""
with pytest.raises(InvalidDirectiveError):
tt = TakeTemplate(TMPL)
def test_attr_text_error(self):
TMPL = """
$ h1 | [href] text
save: fail
"""
with pytest.raises(UnexpectedTokenError):
tt = TakeTemplate(TMPL)
def test_invalid_save_each_context(self):
TMPL = """
$ li
save each: items
$ h1
save: fail
"""
with pytest.raises(TakeSyntaxError):
tt = TakeTemplate(TMPL)
@pytest.mark.inline_ctx
class TestInlineSubCtx():
def test_css_sub_ctx_save(self):
TMPL = """
$ h1 | 0 text ; save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'Text in h1'
def test_css_sub_ctx_save_alias_nested(self):
TMPL = """
$ h1 | 0 text ; : parent.value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['parent']['value'] == 'Text in h1'
def test_accessor_sub_ctx_save(self):
TMPL = """
$ h1
| 0 text ; save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'Text in h1'
def test_multiple_inline_sub_ctx(self):
TMPL = """
$ h1 ; | 0 ; | text ; : value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'Text in h1'
def test_sub_ctx_of_inline_sub_ctx(self):
TMPL = """
$ h1 ; | 0 ; | text
: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'Text in h1'
def test_exits_sub_ctx_of_inline_sub_ctx(self):
TMPL = """
$ h1 ; | 0 ; | text
: h1_value
$ p | text
: p_value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['h1_value'] == 'Text in h1'
assert data['p_value'] == 'some description'
def test_hard_tabs_w_inline_sub_ctxs(self):
TMPL = """
$ h1 ;\t\t| 0 ;\t\t| text
:\t\t\t\t\t\t\t\th1_value
$ p | text
:\t\t\t\t\t\t\t\tp_value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['h1_value'] == 'Text in h1'
assert data['p_value'] == 'some description'
@pytest.mark.field_accessor
class TestFieldAccessor():
def test_basic_field_accessor(self):
TMPL = """
def: simple
$ h1 | 0 text
save: def_value
simple
| .def_value
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'Text in h1'
def test_basic_field_accessor_w_hard_tabs(self):
TMPL = """
def: simple
$\th1\t|\t0\ttext
save\t:\tdef_value
simple
|\t.def_value
save:\tvalue
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'Text in h1'
def test_deep_field_accessor(self):
TMPL = """
def: simple
$ h1 | 0 text
save: item.def_value
simple
save : raw_result
simple
| .item.def_value
save : value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['raw_result']['item']['def_value'] == 'Text in h1'
assert data['value'] == 'Text in h1'
def test_absent_field(self):
TMPL = """
def: simple
$ h1 | 0 text
save: def_value
simple
| .not_there
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == None
def test_deep_absent_field(self):
TMPL = """
def: simple
$ h1 | 0 text
save: def_value
simple
| .very.not_there
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == None
@pytest.mark.own_text_accessor
class TestOwnTextAccessor():
def test_basic_own_text(self):
TMPL = """
$ #not-all-own-text
| text ; save: full_text
| own_text ; save: own_text
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['full_text'] == 'own text not own text more own text'
assert data['own_text'] == 'own text more own text'
@pytest.mark.regexp
class TestRegexpQuery():
def test_basic_terse_regexp(self):
TMPL = """
$ h1 | 0 text
`in \w+`
rx match
| 0
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'in h1'
def test_terse_regexp_capture_groups(self):
TMPL = """
$ h1 | 0 text
`in (\w+)`
rx match
| 0
save: all_match
| 1
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['all_match'] == 'in h1'
assert data['value'] == 'h1'
def test_terse_regexp_custom_accessor(self):
TMPL = """
accessor: in stuff
`in (\w+)`
rx match
set context
$ h1 | 0 text
in stuff
| 0
save: all_match
| 1
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['all_match'] == 'in h1'
assert data['value'] == 'h1'
def test_basic_verbose_regexp(self):
TMPL = """
$ h1 | 0 text
```
in
\s
\w+
```
rx match
| 0
save: value
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
assert data['value'] == 'in h1'
def test_verbose_url_regexp(self):
TMPL = """
accessor: url parts
```
(https?)
(://)
([^/]+)
(?:/(.+))?
```
set context
$ #second-ul a
save each : urls
| [href]
url parts
rx match
| 1 ; : protocol
| 3 ; : domain
| 4 ; : page
"""
tt = TakeTemplate(TMPL)
data = tt(html_fixture)
expect = [
{
'protocol': 'http',
'domain': 'ext.com',
'page': 'a'
},
{
'protocol': 'http',
'domain': 'ext.com',
'page': 'b'
}
]
assert data['urls'] == expect
| [
"joe@jf.io"
] | joe@jf.io |
a1c9abf51f0470a5526a879436bdaedaf39da7ac | fd8833407c00c6cdcdc82623c8b5d4c1fe28da2a | /practicum-1/tasks/task5.py | c009cb66e77638f64a48c449c5472fa705a533fb | [] | no_license | kbalog/uis-dat630-fall2016 | b4eb303ac3d50bb50d764591fb9a9d44f95bb5ae | 5afd4f9e7653b0d148a1c57e3bba84363bf8149c | refs/heads/master | 2020-07-04T05:01:43.001414 | 2016-11-23T11:35:41 | 2016-11-23T11:35:41 | 66,935,163 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | # Computing summary statistics on the Iris dataset
# ================================================
# Task
# ----
# - Load the Iris dataset (`data/iris.data`)
# - Answer the following questions:
# * What is the mean `sepal length` for Iris Setosa?
# * What is the median `petal length` for Iris Virginica?
# * What is the range of `sepal width` for Iris Versicolour?
# * Which class (Setosa/Versicolour/Virginica) shows the highest variance in `petal width`?
# * What is the 70% percentile for `sepal length` and `sepal width` (for all classes together)?
# * Compute Absolute Average Deviation (AAD), Median Absolute Deviation (MAD), and Interquartile
# Range (IQR) for `petal length` (for all classes together).
# Hint: you can exploit the fact that the input is ordered by class: the first 50 records are Iris Setosa,
# records 51-100 are Iris Versicolour, and records 101-150 are Iris Virginica.
# Solution
# --------
# We will use the **csv** module for reading in data from a file.
import csv
# The data set is stored in a comma-separated text file.
# We read it and store it as a list of records, where each record is represented using a dict.
def load_iris_data(filename):
records = []
with open(filename, 'rt') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
if len(row) == 5: # if we have 4 fields in that line
records.append({
"sepal_length": float(row[0]),
"sepal_width": float(row[1]),
"petal_length": float(row[2]),
"petal_width": float(row[3]),
"class": row[4]
})
return records
iris_data = load_iris_data("../data/iris.data")
# Hints:
# Get a slice of the list, e.g., all Iris Versicolour records: iris_data[50:100]
# Get a given attribute as a list, e.g., sepal with: attr = [x['sepal_width'] for x in iris_data]
| [
"krisztian.balog@uis.no"
] | krisztian.balog@uis.no |
adc81a7c0f1bd77f697a1541415e1ac6ffd4fe7d | 57e20cadd88f83521f53ab5973fa44395b1ad05a | /utils/counters.py | 15ff8fee809429cae4c62d4670c41521bd24173c | [] | no_license | horia141/zigzag | 6ac64d72d8ee5a2c25775cbf08f573f6351ecbec | 3bde5d7f5ea05867e796b8a946a9baa75fdf3707 | refs/heads/master | 2020-04-11T03:02:46.446669 | 2016-02-05T20:43:20 | 2016-02-05T20:43:20 | 31,177,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | class Counters(object):
def __init__(self):
self._counters = {}
def inc(self, name, value=1):
key_name = name.lower()
self._counters[key_name] = self._counters.get(key_name, 0) + value
def clear(self):
self._counters = {}
def format(self, padding=''):
if len(self._counters) == 0:
return ''
names = sorted(self._counters.keys())
max_name_size = max(len(name) for name in names)
text = '\n'.join('%s%s %d'% (padding, name.ljust(max_name_size), self._counters[name])
for name in names)
return text
| [
"horia141@gmail.com"
] | horia141@gmail.com |
9bc17966d5cc0678b54efbfe6068eb6d99105f03 | 6fae09c2b851c47c21157e371a0a38dbbcd3ca74 | /Y/google-cloud-sdk/lib/surface/container/clusters/get_credentials.py | 13de2c65761584442e0f03c1fcba34cfe9cd6340 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/sistemaMonitoreo | 50a4c05bace2be8a691130d2026d2e51d332a008 | c2328afd613864dc4ba1f296946fecf8c7e22843 | refs/heads/master | 2022-11-19T16:18:22.247779 | 2019-01-27T20:49:40 | 2019-01-27T20:49:40 | 282,366,646 | 0 | 0 | null | 2020-07-25T03:59:20 | 2020-07-25T03:59:19 | null | UTF-8 | Python | false | false | 4,549 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetch cluster credentials."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
NOT_RUNNING_MSG = '''\
cluster {0} is not running. The kubernetes API may not be available.'''
class GetCredentials(base.Command):
"""Fetch credentials for a running cluster.
{command} updates a `kubeconfig` file with appropriate credentials and
endpoint information to point `kubectl` at a specific cluster in Google
Kubernetes Engine.
It takes a project and a zone as parameters, passed through by set
defaults or flags. By default, credentials are written to `HOME/.kube/config`.
You can provide an alternate path by setting the `KUBECONFIG` environment
variable. If `KUBECONFIG` contains multiple paths, the first one is used.
This command enables switching to a specific cluster, when working
with multiple clusters. It can also be used to access a previously created
cluster from a new workstation.
By default, {command} will configure kubectl to automatically refresh its
credentials using the same identity as gcloud. If you are running kubectl as
part of an application, it is recommended to use [application default
credentials](https://cloud.google.com/docs/authentication/production).
To configure a `kubeconfig` file to use application default credentials, set
the container/use_application_default_credentials
[Cloud SDK property](https://cloud.google.com/sdk/docs/properties) to true
before running {command}
See [](https://cloud.google.com/kubernetes-engine/docs/kubectl) for
kubectl documentation.
"""
detailed_help = {
'EXAMPLES':
"""\
To switch to working on your cluster 'testcluster1', run:
$ {command} testcluster1 --zone=us-central1-f
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'name',
help='Name of the cluster to get credentials for.',
action=actions.StoreProperty(properties.VALUES.container.cluster))
parser.add_argument(
'--internal-ip',
help='Whether to use the internal IP address of the cluster endpoint.',
action='store_true')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
util.Error: if the cluster is unreachable or not running.
"""
util.CheckKubectlInstalled()
adapter = self.context['api_adapter']
location_get = self.context['location_get']
location = location_get(args)
cluster_ref = adapter.ParseCluster(args.name, location)
log.status.Print('Fetching cluster endpoint and auth data.')
# Call DescribeCluster to get auth info and cache for next time
cluster = adapter.GetCluster(cluster_ref)
auth = cluster.masterAuth
# TODO(b/70856999) Make this consistent with the checks in
# api_lib/container/kubeconfig.py.
missing_creds = not (auth and auth.clientCertificate and auth.clientKey)
if missing_creds and not util.ClusterConfig.UseGCPAuthProvider():
raise util.Error(
'get-credentials requires edit permission on {0}'.format(
cluster_ref.projectId))
if not adapter.IsRunning(cluster):
log.warning(NOT_RUNNING_MSG.format(cluster_ref.clusterId))
util.ClusterConfig.Persist(cluster, cluster_ref.projectId,
args.internal_ip)
| [
"guillermo.nunez@gnp.com.mx"
] | guillermo.nunez@gnp.com.mx |
c8e6f159a7813608572c6285f8a0b42c0a56fd09 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_213/61.py | cfe6796cc3346eddb991963bab839f4b05e02f73 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | # link: https://code.google.com/codejam/contest/5314486/dashboard#s=1
import string
import time
testIndex=2
problemRoot="d:/prog/versenyek/googlejam"
problemDir="2017/round2"
problemName="B"
inputFiles= ["-example.in", "-small.in", "-large.in"]
outputFiles=["-example.out", "-small.out", "-large.out"]
time1=time.time()
fileName=string.strip(problemRoot)+"/"+problemDir+"/"+problemName+inputFiles[testIndex]
inputData=[map(int,line.split()) for line in open(fileName,'r') if line.strip()]
fileName=string.strip(problemRoot)+"/"+problemDir+"/"+problemName+outputFiles[testIndex]
fileToWrite=open(fileName,'wb')
time2=time.time()
lineIdx=1
for case in xrange(inputData[0][0]):
n,c,m=inputData[lineIdx]
tick=[0]*n
cost=[0]*c
lineIdx+=1
for i in xrange(m):
tick[inputData[lineIdx+i][0]-1]+=1
cost[inputData[lineIdx+i][1]-1]+=1
lineIdx+=m
rides=max(cost) # the most ticket at one user
fstk=0
for i in xrange(n):
fstk+=tick[i]
rides=max(rides,(fstk-1)/(i+1)+1)
pro=0
for i in xrange(n):
if tick[i]>rides:
pro+=tick[i]-rides
fileToWrite.write("Case #"+str(case+1)+": "+str(rides)+" "+str(pro)+"\n")
fileToWrite.close()
print 'Total time: ', time.time() - time1
print 'Solving time: ', time.time() - time2
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8e5ce85a2fe571d465291f517c5e0f7843b78794 | 9602495fa11785bfd481d23abf789ce95b751fc2 | /uuv_smac_utils/scripts/create_results_folder | 2869d67a16a6e49988db26873b5acae0ef1b3a18 | [
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fredvaz/uuv_simulation_evaluation | 0f8d890ad4fccdfe87945ea9e603d81fb1d2f42b | 62c4c999b2ff995b346a06092f1bc57beea5af73 | refs/heads/master | 2020-03-26T08:53:33.214895 | 2018-08-14T13:38:49 | 2018-08-14T13:38:49 | 144,724,563 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,442 | #!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import rospy
import yaml
import shutil
from time import gmtime, strftime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create results folder')
parser.add_argument(
'--output_dir',
type=str,
default='.')
parser.add_argument(
'--input_dir',
type=str,
default='.')
# Parse input arguments
args = parser.parse_args(rospy.myargv()[1:])
assert None not in [args.input_dir, args.output_dir], 'Invalid input or output folders'
assert os.path.isdir(args.output_dir), 'Invalid output directory, dir=' + str(args.output_dir)
assert os.path.isdir(args.input_dir), 'Invalid input directory, dir=' + str(args.input_dir)
print 'Input folder = ' + args.input_dir
print 'Output folder = ' + args.output_dir
smac_folders = list()
other_files = list()
for item in os.listdir(args.input_dir):
if ('.yml' in item or '.txt' in item or '.pcs' in item) and \
os.path.isfile(os.path.join(args.input_dir, item)):
other_files.append(os.path.join(args.input_dir, item))
print 'File found = ' + os.path.join(args.input_dir, item)
if 'smac3-output' in item and os.path.isdir(os.path.join(args.input_dir, item)):
smac_folders.append(os.path.join(args.input_dir, item))
print 'SMAC output folder found = ' + smac_folders[-1]
assert len(smac_folders) > 0, 'No SMAC output folders found in directory ' + args.input_dir
datestr = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
results_folder_prefix = 'results_' + datestr
results_folder_prefix = results_folder_prefix.replace(' ', '_')
results_folder_prefix = results_folder_prefix.replace(':', '-')
info_file = dict(name='<add name here>',
date=strftime("%Y-%m-%d_%H:%M:%S", gmtime()),
iterations='<add max. number of iterations used>',
controller='<add name of the controller here>',
description='<add description here>')
for i in range(len(smac_folders)):
results_folder = os.path.join(args.output_dir, results_folder_prefix + '_' + str(i), 'smac_config')
assert not os.path.isdir(results_folder), 'Results folder already exists!'
os.makedirs(results_folder)
print 'Results folder created = ' + results_folder
for item in other_files:
shutil.copy(item, results_folder)
shutil.copytree(smac_folders[i],
os.path.join(results_folder, os.path.basename(smac_folders[i])))
with open(os.path.join(args.output_dir, results_folder_prefix + '_' + str(i), 'info.yaml'), 'w') as info:
yaml.dump(info_file, info, default_flow_style=False)
| [
"fredvaz8@gmail.com"
] | fredvaz8@gmail.com | |
2dea5afe2da38332a9e1ae100dcd6b3750a2efc4 | c6374029bcba930ab37098e8e954067aeae252d8 | /mla/svm/kernerls.py | da289a183af1d605a4fb65c9a4a197ad9621ecda | [
"MIT"
] | permissive | bhumikadhingra/MLAlgorithms | ab6f20aa8a899ff265668155cb4083ec19535429 | 8f002d0804663854eaec41b4ead698caaaf11c69 | refs/heads/master | 2020-08-07T09:12:36.194453 | 2019-10-07T13:15:12 | 2019-10-07T13:15:12 | 213,387,107 | 1 | 0 | MIT | 2019-10-07T13:13:09 | 2019-10-07T13:13:09 | null | UTF-8 | Python | false | false | 721 | py | # coding:utf-8
import numpy as np
import scipy.spatial.distance as dist
class Linear(object):
def __call__(self, x, y):
return np.dot(x, y.T)
def __repr__(self):
return "Linear kernel"
class Poly(object):
def __init__(self, degree=2):
self.degree = degree
def __call__(self, x, y):
return np.dot(x, y.T) ** self.degree
def __repr__(self):
return "Poly kernel"
class RBF(object):
def __init__(self, gamma=0.1):
self.gamma = gamma
def __call__(self, x, y):
x = np.atleast_2d(x)
y = np.atleast_2d(y)
return np.exp(-self.gamma * dist.cdist(x, y) ** 2).flatten()
def __repr__(self):
return "RBF kernel"
| [
"me@rushter.com"
] | me@rushter.com |
e66ed88d5c9896e7e12a2b34aceded6f5f181e35 | d3f3a81a2b77db63381424e5fd8b27cf09c1dfdd | /ExpSettings/ParameterSettings.py | 9e790814de091557213e3a4315d13d52ddade216 | [
"Apache-2.0"
] | permissive | gokhangg/Uncertainix | e0d1f92575bfbf45b5ec0564d192e366595c3e28 | feb86dc9a8152bc133f99c56d8f15bf760754218 | refs/heads/master | 2021-06-23T07:23:22.132970 | 2020-12-25T20:55:24 | 2020-12-25T20:55:24 | 181,728,623 | 0 | 0 | Apache-2.0 | 2020-12-25T20:55:25 | 2019-04-16T16:39:07 | Python | UTF-8 | Python | false | false | 2,997 | py | # *=========================================================================
# *
# * Copyright Erasmus MC Rotterdam and contributors
# * This software is licensed under the Apache 2 license, quoted below.
# * Copyright 2019 Erasmus MC Rotterdam.
# * Copyright 2019 Gokhan Gunay <g.gunay@erasmsumc.nl>
# * Licensed under the Apache License, Version 2.0 (the "License"); you may not
# * use this file except in compliance with the License. You may obtain a copy of
# * the License at
# * http://www.apache.org/licenses/LICENSE-2.0
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# * License for the specific language governing permissions and limitations under
# * the License.
# *=========================================================================
class ParameterSettings(object):
__size = 1
def __init__(self, parameters):
self.__parameters = parameters
self.__valueMappingFunctions = []
for _ in range(len(self.__parameters)):
self.__valueMappingFunctions.append(lambda a:a)
def GetParamSize(self):
return len(self.__parameters)
def GetParamNames(self):
names = []
for param in self.__parameters:
names.append(param.GetName())
def GetParamValueSize(self):
return len(self.__parameters[0].GetValues())
def GetParamValues(self):
values = []
for param in self.__parameters:
values.append(param.GetValues())
def GetParamStatistics(self):
statistics = []
for param in self.__parameters:
statistics.append(param.GetStatistics())
def GetParamTypes(self):
types = []
for param in self.__parameters:
types.append(param.GetType())
def GetParamMethodParameters(self):
methodParameters = []
for param in self.__parameters:
methodParameters.append(param.GetMethodParameters())
return methodParameters
def SetParamValues(self, paramIndex, values):
assert paramIndex < len(self.__parameters), "Parameter number mismatch."
values = self.__MapParamVals(self, paramIndex, values)
values = self.__ListToStr(values)
self.__parameters[paramIndex].SetValues(values)
def SaveParamValuesToFIle(self, file):
pass
def ReadParamValsFromFile(self, file):
pass
@classmethod
def __MapParamVals(cls, paramIndex, values):
for ind in range(len(values)):
values[ind] = cls.__valueMappingFunctions[ind](values[ind])
return values
@staticmethod
def __ListToStr(ls):
retVal = []
for par in ls:
retVal.append(str(par))
return retVal | [
"gokhan.gunay@twtg.io"
] | gokhan.gunay@twtg.io |
447ac3f1ac0d106fba52193d97ee08ee90a288e7 | 0d329ade9fcb1edcb9825d2450b57ad3b467e240 | /plot_batch_1.py | 39e456bc0d6584b7db9ea047f848a5d2b2d2e8da | [
"BSD-2-Clause"
] | permissive | ROBOTICSENGINEER/Automatic-Open-World-Reliability-Assessment | 39539e5ad6456b83780694915525149fbadcaa74 | 459d002e482791e1dc47be681d5c073b6e8fce6d | refs/heads/main | 2023-01-29T21:08:40.788927 | 2020-12-10T23:01:28 | 2020-12-10T23:01:28 | 311,183,904 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,253 | py | import numpy as np
import matplotlib.pyplot as plt
data = np.load('ond_array_plot_4.npz')
threshold = data['threshold']
total_accuracy = data['total_accuracy']
failure = data['failure']
early = data['early']
on_time = data['on_time']
late = data['late']
absolute_error = data['absolute_error']
del data
detected = 1 - failure
x = np.arange(2,26)
ind = [0] * 7
for k in range(7):
ind[k] = np.argmax(total_accuracy[0,:,k])
absolute_error[failure > 0.99] = np.nan
early[failure > 0.99] = np.nan
on_time[failure > 0.99] = np.nan
late[failure > 0.99] = np.nan
total_accuracy[failure > 0.99] = np.nan
detected[failure > 0.99] = np.nan
peak_total_accuracy = np.zeros((len(x),failure.shape[2]))
peak_early = np.zeros((len(x),failure.shape[2]))
peak_on_time = np.zeros((len(x),failure.shape[2]))
peak_late = np.zeros((len(x),failure.shape[2]))
peak_absolute_error = np.zeros((len(x),failure.shape[2]))
peak_detected = np.zeros((len(x),failure.shape[2]))
for k,p in enumerate(x):
for j in range(7):
peak_total_accuracy[k,j] = total_accuracy[k,ind[j],j]
peak_early[k,j] = early[k,ind[j],j]
peak_on_time[k,j] = on_time[k,ind[j],j]
peak_late[k,j] = late[k,ind[j],j]
peak_absolute_error[k,j] = absolute_error[k,ind[j],j]
peak_detected[k,j] = detected[k,ind[j],j]
fig, ax = plt.subplots(figsize=(6,4) , dpi =300)
ax.plot(x, peak_total_accuracy[:,0], color='#800000', linewidth=4, label = 'mean SoftMax')
ax.plot(x, peak_total_accuracy[:,1], color='#ff0000', linewidth=4, label = 'KL SoftMax')
# ax.plot(x, peak_total_accuracy[:,6], color='#ff00ff', linewidth=4, label = 'OLD OND')
ax.plot(x, peak_total_accuracy[:,2], color='#0000ff', linewidth=4, label = 'OND EVM')
ax.plot(x, peak_total_accuracy[:,3], color='#00ffff', linewidth=4, label = 'KL EVM')
# ax.plot(x, peak_total_accuracy[:,4], color='#00ff00', linewidth=4, label = 'Bi KL independent')
ax.plot(x, peak_total_accuracy[:,5], color='#008000', linewidth=4, label = 'Bi KL full')
ax.set_xlim([2,25])
# ax.set_ylim([0.0,1.0])
plt.xlabel('Percentage of unknown')
plt.ylabel('Accuracy')
plt.title(f'Total Accuracy')
plt.legend(bbox_to_anchor=(1.05, 1) , ncol=1)
plt.show()
fig, ax = plt.subplots(figsize=(6,4) , dpi =300)
ax.plot(x, peak_early[:,0], color='#800000', linewidth=4, label = 'mean SoftMax')
ax.plot(x, peak_early[:,1], color='#ff0000', linewidth=4, label = 'KL SoftMax')
# ax.plot(x, peak_early[:,6], color='#ff00ff', linewidth=4, label = 'OLD OND')
ax.plot(x, peak_early[:,2], color='#0000ff', linewidth=4, label = 'OND EVM')
ax.plot(x, peak_early[:,3], color='#00ffff', linewidth=4, label = 'KL EVM')
# ax.plot(x, peak_early[:,4], color='#00ff00', linewidth=4, label = 'Bi KL independent')
ax.plot(x, peak_early[:,5], color='#008000', linewidth=4, label = 'Bi KL full')
ax.set_xlim([2,25])
# ax.set_ylim([0.0,1.0])
plt.xlabel('Percentage of unknown')
plt.ylabel('Ratio')
plt.title('Ratio early')
plt.legend(bbox_to_anchor=(1.05, 1) , ncol=1)
plt.show()
fig, ax = plt.subplots(figsize=(6,4) , dpi =300)
ax.plot(x, peak_on_time[:,0], color='#800000', linewidth=4, label = 'mean SoftMax')
ax.plot(x, peak_on_time[:,1], color='#ff0000', linewidth=4, label = 'KL SoftMax')
# ax.plot(x, peak_on_time[:,6], color='#ff00ff', linewidth=4, label = 'OLD OND')
ax.plot(x, peak_on_time[:,2], color='#0000ff', linewidth=4, label = 'OND EVM')
ax.plot(x, peak_on_time[:,3], color='#00ffff', linewidth=4, label = 'KL EVM')
# ax.plot(x, peak_on_time[:,4], color='#00ff00', linewidth=4, label = 'Bi KL independent')
ax.plot(x, peak_on_time[:,5], color='#008000', linewidth=4, label = 'Bi KL full')
ax.set_xlim([2,25])
# ax.set_ylim([0.0,1.0])
plt.xlabel('Percentage of unknown')
plt.ylabel('Ratio')
plt.title('Ratio on-time')
plt.legend(bbox_to_anchor=(1.05, 1) , ncol=1)
plt.show()
fig, ax = plt.subplots(figsize=(6,4) , dpi =300)
ax.plot(x, peak_late[:,0], color='#800000', linewidth=4, label = 'mean SoftMax')
ax.plot(x, peak_late[:,1], color='#ff0000', linewidth=4, label = 'KL SoftMax')
# ax.plot(x, peak_late[:,6], color='#ff00ff', linewidth=4, label = 'OLD OND')
ax.plot(x, peak_late[:,2], color='#0000ff', linewidth=4, label = 'OND EVM')
ax.plot(x, peak_late[:,3], color='#00ffff', linewidth=4, label = 'KL EVM')
# ax.plot(x, peak_late[:,4], color='#00ff00', linewidth=4, label = 'Bi KL independent')
ax.plot(x, peak_late[:,5], color='#008000', linewidth=4, label = 'Bi KL full')
ax.set_xlim([2,25])
# ax.set_ylim([0.0,1.0])
plt.xlabel('Percentage of unknown')
plt.ylabel('Ratio')
plt.title('Ratio late')
plt.legend(bbox_to_anchor=(1.05, 1) , ncol=1)
plt.show()
fig, ax = plt.subplots(figsize=(6,4) , dpi =300)
ax.plot(x, peak_absolute_error[:,0], color='#800000', linewidth=4, label = 'mean SoftMax')
ax.plot(x, peak_absolute_error[:,1], color='#ff0000', linewidth=4, label = 'KL SoftMax')
# ax.plot(x, peak_absolute_error[:,6], color='#ff00ff', linewidth=4, label = 'OLD OND')
ax.plot(x, peak_absolute_error[:,2], color='#0000ff', linewidth=4, label = 'OND EVM')
ax.plot(x, peak_absolute_error[:,3], color='#00ffff', linewidth=4, label = 'KL EVM')
# ax.plot(x, peak_absolute_error[:,4], color='#00ff00', linewidth=4, label = 'Bi KL independent')
ax.plot(x, peak_absolute_error[:,5], color='#008000', linewidth=4, label = 'Bi KL full')
ax.set_xlim([2,25])
# ax.set_ylim([0.0,1.0])
plt.xlabel('Percentage of unknown')
plt.ylabel('Error')
plt.title('Mean Absolute Error')
plt.legend(bbox_to_anchor=(1.05, 1) , ncol=1)
plt.show()
fig, ax = plt.subplots(figsize=(6,4) , dpi =300)
ax.plot(x, peak_detected[:,0], color='#800000', linewidth=4, label = 'mean SoftMax')
ax.plot(x, peak_detected[:,1], color='#ff0000', linewidth=4, label = 'KL SoftMax')
# ax.plot(x, peak_detected[:,6], color='#ff00ff', linewidth=4, label = 'OLD OND')
ax.plot(x, peak_detected[:,2], color='#0000ff', linewidth=4, label = 'OND EVM')
ax.plot(x, peak_detected[:,3], color='#00ffff', linewidth=4, label = 'KL EVM')
# ax.plot(x, peak_detected[:,4], color='#00ff00', linewidth=4, label = 'Bi KL independent')
ax.plot(x, peak_detected[:,5], color='#008000', linewidth=4, label = 'Bi KL full')
ax.set_xlim([2,25])
# ax.set_ylim([0.0,1.0])
plt.xlabel('Percentage of unknown')
plt.ylabel('Ratio')
plt.title('Ratio Detected')
plt.legend(bbox_to_anchor=(1.05, 1) , ncol=1)
plt.show()
| [
"noreply@github.com"
] | ROBOTICSENGINEER.noreply@github.com |
e6b15aacdac8f257c3240c256182cbdfa5762876 | 684ef6a4a71c4f2a9d1dd475eba53a0db4555199 | /semantic-parserV1.py | 48ce7a341cf0bf702752186741f551f08975ea9b | [] | no_license | MananVyas24/NLP-SemanticParser | 89a3be0193503fd9f0ef08efb668c568189f8be4 | 610cdb9b27b6b32dc30c7c5f18b97699f90b46aa | refs/heads/master | 2022-12-11T11:04:40.346199 | 2019-03-05T03:11:01 | 2019-03-05T03:11:01 | 173,862,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,821 | py | #!/usr/bin/env python
import itertools, sys
from nltk.parse import RecursiveDescentParser, ShiftReduceParser, ViterbiParser
from nltk import Nonterminal, nonterminals, Production, CFG, PCFG
TEST_SENTENCES = [
"acoustic contact on DIFAR 95 , bearing 090 , frequency 1.234 .",
"additional buoys of interest are 95 and 94 .",
"updated bearing of 095 out of DIFAR 95 .",
"roger .",
"standby for localization pattern .",
"turning inbound , drop heading is 120 .",
"standby for buoy drops .",
"sonobuoy power .",
"sonobuoy arm , buoy available .",
"DIFAR 94 away .",
"good signal on all buoys .",
"currently in the localization phase and monitoring all sensors for subsurface contact .",
"all buoys tuned and processing .",
"we currently have contact on DIFAR 94 bearing 072 and on DIFAR 93 bearing 290 .",
"contact is assessed as a Chinese nuclear submarine based on frequency of 1.234 hertz .",
"contact is now coming in on DIFAR 94 bearing 072 and on DIFAR 93 bearing 290 .",
"roger .",
"new fix position established .",
"gentrack with initial course estimate of 225 and speed 5 knots .",
"contact is classified as Chinese nuclear submarine based on detected frequencies of 123.4 hertz .",
"send contact report using the gentrack course and speed .",
"wilco .",
"contact report has been sent to the TOC QSL 0945 Zulu .",
"we are transitioning from the localization to tracking .",
"I estimate the TOI course and speed to be 225 at 8 knots ."
]
grammar = CFG.fromstring("""
S -> CP '.'
S -> CP CONJ S
S -> NP
CP -> VAR_COMPLIANCE
CP -> NP ADJ
CP -> VP
CP -> VP PP
CP -> VP VP
CP -> NP PP
NP -> DET NP
NP -> ADJ NP
NP -> N
NP -> N NP
NP -> N PP
NP -> N CONJ NP
PP -> P NP
PP -> P PP
PP -> P NP PP
PP -> P NP CONJ PP
VP -> V
VP -> NP V
VP -> V NP
VP -> V NP PP
VP -> V NP NP
VP -> NP V NP
VP -> NP V NP NP
VP -> ADVERB VP
V -> AUX
V -> AUX V
V -> VAR_INSTRUCTION | VAR_ACTIVITY
V -> 'established' | 'classified' | 'away' | 'tuned' | 'processing'
V -> 'estimate' | 'updated' | 'arm' | 'power' | 'updated' | 'using'
V -> 'sent' | 'coming' | 'assessed' | 'detected'
V -> 'to' 'be'
ADVERB -> 'now' | 'currently'
AUX -> 'is' | 'was' | 'are' | 'were' | 'has' | 'be' | 'have'
AUX -> 'has' 'been'
ADJ -> VAR_STATUS
ADJ -> 'new' | 'additional' | 'available'
P -> 'for' | 'in' | 'of' | 'on' | 'from' | 'to' | 'with' | 'at' | 'as'
P -> 'out' PP
P -> 'based' PP
N -> VAR_INFO | VAR_DATA | VAR_PHASE | VAR_EVENT | VAR_ITEM | '*VAR-NUM*' | VAR_DIRECTION | VAR_PRONOUN | VAR_LOCATION
N -> 'signal' | 'interest' | 'speed' | 'TOI' | 'course' | 'frequency' | 'frequencies'
N -> VAR_ITEM VAR_INFO
N -> 'TOI' VAR_INFO
DET -> 'the' | 'a' | 'an' | 'this' | 'all'
CONJ -> 'and' | 'or' | ',' | ';'
VAR_COMPLIANCE -> 'roger' | 'wilco'
VAR_STATUS -> 'good' | 'bad' | 'great' | 'horrible' | 'terrible'
VAR_INSTRUCTION -> 'standby' | 'send'
VAR_PRONOUN -> 'I' | 'you' | 'he' | 'she' | 'we' | 'they' | 'contact' | 'who'
ATION -> 'TOC' 'QSL' '*VAR-NUM*' 'Zulu'
VAR_ACTIVITY -> 'monitoring' | 'studying' | 'turning' | 'transitioning'
VAR_INFO -> 'course' 'and' 'speed' | 'drop' 'heading' | 'fix' 'position' | 'initial' 'course' 'estimate' | 'bearing'
VAR_PHASE -> 'localization' | 'localization' 'phase' | 'localization' 'pattern' | 'tracking' | 'tracking' 'phase' | 'tracking' 'pattern'
VAR_DIRECTION -> 'inbound'
VAR_EVENT -> 'buoy' 'drops' | 'subsurface' 'contact' | 'acoustic' 'contact'
VAR_ITEM -> 'all' 'buoys' | 'buoy' | 'buoys' | 'sonobuoy' | 'sonobuoys' | 'Chinese' 'nuclear' 'submarine' | 'contact' 'report' | 'sensors' | 'gentrack' | 'all' 'sensors'
VAR_DATA -> DATATYPE '*VAR-NUM*'
VAR_DATA -> '*VAR-NUM*' DATATYPE
DATATYPE -> 'DIFAR' | 'bearing' | 'frequency' | 'frequencies' | 'knots' | 'hertz'
""")
##############################################################################
def isFloatNum(x):
try:
float(x)
return True
except ValueError:
return False
def parseNums(tokens):
return ["*VAR-NUM*" if (tkn.isdigit() or isFloatNum(tkn)) else tkn for tkn in tokens]
def main(sentences):
parser = RecursiveDescentParser(grammar)
"""
NOTE: While this demo uses a CFG, in practice we would develop a probabilistic
context free grammar (PCFG) and then use the Viterbi parser below for efficient
parsing.
"""
#parser = ViterbiParser(grammar)
sentences_to_skip = [12, 13]
print "Num sentences: " + repr(len(sentences))
for i, sent in enumerate(sentences):
tkns = parseNums(sent.split(" "))
try:
print "-"*60
print "Parsing Sentence #%d/%d."%(i+1, len(sentences))
print "Sentence: \"%s\""%(sent)
print "Tokenization: " + repr(tkns)
if i in sentences_to_skip:
print "Skipping this sentence as it takes a bit too long to parse without PCFG."
continue
print ""
# Just take the first two parses produced for the demo.
trees = list(itertools.islice(parser.parse(tkns), 2))
for j, t in enumerate(trees):
print "Parse #%d/%d:"%(j+1, len(trees))
print t
assert len(trees) > 0
except Exception as e:
print "ERROR: Failed to parse sentence."
print e
finally:
print ""
sys.stdout.flush()
if __name__=='__main__':
#ordered_sents = [s for (_, s) in sorted([(len(s.split()), s) for s in TEST_SENTENCES])]
#main(ordered_sents)
main(TEST_SENTENCES) | [
"noreply@github.com"
] | MananVyas24.noreply@github.com |
322f1c3cf85b4af0a0a8a8e4c4ae3a3c539f7dd7 | 71475e1214ac703b095b7a87ecaf89ad17b0ffd9 | /img/settings.py | 66946b6f7dac97ca96f684bb0471359a3cc53298 | [] | no_license | zeng342541425/mzitu | cc3443e8795ba2437a59c51683882b4f16bf630e | 3bc67f8640b4427f140ba25598ddd73c68205c1b | refs/heads/master | 2021-09-01T02:07:26.799526 | 2017-12-24T10:08:20 | 2017-12-24T10:08:20 | 115,096,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,348 | py | # -*- coding: utf-8 -*-
# Scrapy settings for img project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'img'
SPIDER_MODULES = ['img.spiders']
NEWSPIDER_MODULE = 'img.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'img (+http://www.mzitu.com)'
#'USER_AGENT'='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
# 'Accept-Encoding':'gzip, deflate, br',
# 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'img.middlewares.ImgSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'img.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'img.pipelines.ImgPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"342541425@qq.com"
] | 342541425@qq.com |
387523e464797ebfe8e34406b339dc22c29b74c0 | 69099b95bb1507b30e6be8d4ad1d39f923833e97 | /BIRL_optimal_demos.py | 737cf7525eced539ad6a481bc67d6068cbee618d | [
"MIT"
] | permissive | dsbrown1331/aima-based-irl | 80d8cc2eafd751bd84bdcda6ad5a9a44060947c6 | 553550030240ae886f4260ece59dd252adb1fc6e | refs/heads/master | 2021-01-17T23:12:03.038606 | 2016-09-27T15:30:27 | 2016-09-27T15:30:27 | 67,889,747 | 1 | 0 | null | 2016-09-10T19:16:56 | 2016-09-10T19:16:55 | null | UTF-8 | Python | false | false | 4,300 | py | import numpy as np
import mdp
from my_birl_batch import *
from my_birl import *
from halfspace_uncertainty import *
from mdp_feature_counts import *
from optimal_teaching import *
from activeLearning import chain_variance
import operator
for size in range(3,4):
print "^^^^^^", size, "^^^^^^^"
f = open('active_results/optimalTest' + str(size)+ '2.txt','w')
for iter in range(10):
print "-----", iter, "------"
#generate a random n by n world
grid_width = size
grid_height = size
rand_reward = []
for row in range(grid_height):
temp = []
for col in range(grid_width):
temp.append(np.random.randint(-10,0))
rand_reward.append(temp)
rand_reward[0][0] = 10
###for debugging
#rand_reward = [[10.00, -5.00, -5.00],
#[-1.00, -1.00, -1.00 ]]
###
terminals=[(0,grid_height-1)]
init = []
for row in range(grid_height):
for col in range(grid_width):
if row == grid_height-1 and col == 0:
continue
init.append((col,row))
print "init"
print init
expert_mdp = mdp.GridMDP(deepcopy(rand_reward), terminals, init)
expert_mdp.print_rewards()
expert_mdp.print_arrows()
#try Cakmak's Task 1 with just one start to see if it gets the same demo
#birlToy = DeterministicWeightGridMDP(
# features = ['f0', 'f1', 'f2'],
# weights = {'f0': 10, 'f1': -5, 'f2': -1, None: None},
# grid = [['f0', 'f1', 'f1'],
# ['f2', 'f2', 'f2']],
# terminals=[(0,1)],
# init = [(0,0),(1,0),(1,1),(2,0),(2,1)], gamma = 0.9)
features = []
count = 0
for row in range(grid_height):
for col in range(grid_width):
features.append('f' + str(count))
count += 1
#print "features"
#print features
weights = {}
count = 0
for row in range(grid_height):
for col in range(grid_width):
#print row,col
weights[features[count]] = rand_reward[row][col]
count += 1
weights[None] = None
print "weights"
print weights
grid = []
count = 0
for row in range(grid_height):
temp = []
for col in range(grid_width):
temp.append(features[count])
count += 1
grid.append(temp)
#print "grid"
#print grid
#select random init state
demo_init = init[np.random.randint(0,len(init))]
print "demo_init"
print demo_init
#generate random demo
demo = []
expert_policy = best_policy(expert_mdp, value_iteration(expert_mdp, 0.001))
demo.append(mdp.generate_demonstration(demo_init, expert_policy, expert_mdp))
print "demo"
print demo
rand_task = DeterministicWeightGridMDP(
features, weights, grid, terminals, init, gamma = 0.95)
#rand_task.print_rewards()
#rand_task.print_arrows()
cakmak_optimal = seeded_optimal_teaching(demo,rand_task, 100000,10)
#print("solution: ", cakmak_optimal)
score, cakmak_demo = cakmak_optimal
cakmak_init = cakmak_demo[0][0]
print "cakmak", cakmak_init
#compare to BIRL active learning reward variance approach
chain_length = 12000
chain_burn = 2000
birl = BIRL_BATCH(demo, expert_mdp.get_grid_size(), expert_mdp.terminals, expert_mdp.init,
step_size=1.0, birl_iteration = chain_length)
chain, mapMDP = birl.run_birl()
chain_var = chain_variance(chain, chain_burn)
#find highest variance that's not start of demo or terminal state
chain_var.pop(terminals[0])
sorted_var = sorted(chain_var.items(), key=operator.itemgetter(1))
sorted_var.reverse()
query_states = [state for state, var in sorted_var]
print query_states
indx = query_states.index(cakmak_init)
print indx
f.write(str(indx) + '\n') # python will convert \n to os.linesep
f.close()
| [
"dsbrown1331@gmail.com"
] | dsbrown1331@gmail.com |
e35261b22200adf9a747944433535b3113ca9f20 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cvtmodel/regnet/src/regnet_y_32gf.py | 83f6c9481a8ff8e6c10c4b8672cd4e25a52daf55 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 39,743 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.ops as P
from mindspore import nn
class Module0(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
conv2d_2_stride, conv2d_2_group, avgpool2d_4_kernel_size, conv2d_5_in_channels, conv2d_5_out_channels,
conv2d_7_in_channels, conv2d_7_out_channels):
super(Module0, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
out_channels=conv2d_2_out_channels,
kernel_size=(3, 3),
stride=conv2d_2_stride,
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=conv2d_2_group,
has_bias=True)
self.relu_3 = nn.ReLU()
self.avgpool2d_4 = nn.AvgPool2d(kernel_size=avgpool2d_4_kernel_size)
self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
out_channels=conv2d_5_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_6 = nn.ReLU()
self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
out_channels=conv2d_7_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.sigmoid_8 = nn.Sigmoid()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
opt_relu_3 = self.relu_3(opt_conv2d_2)
opt_avgpool2d_4 = self.avgpool2d_4(opt_relu_3)
opt_conv2d_5 = self.conv2d_5(opt_avgpool2d_4)
opt_relu_6 = self.relu_6(opt_conv2d_5)
opt_conv2d_7 = self.conv2d_7(opt_relu_6)
opt_sigmoid_8 = self.sigmoid_8(opt_conv2d_7)
opt_mul_9 = P.Mul()(opt_sigmoid_8, opt_relu_3)
return opt_mul_9
class Module1(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module0_0_conv2d_0_in_channels,
module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels, module0_0_conv2d_2_out_channels,
module0_0_conv2d_2_stride, module0_0_conv2d_2_group, module0_0_avgpool2d_4_kernel_size,
module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels, module0_0_conv2d_7_in_channels,
module0_0_conv2d_7_out_channels):
super(Module1, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_stride=module0_0_conv2d_2_stride,
conv2d_2_group=module0_0_conv2d_2_group,
avgpool2d_4_kernel_size=module0_0_avgpool2d_4_kernel_size,
conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
def construct(self, x):
module0_0_opt = self.module0_0(x)
opt_conv2d_0 = self.conv2d_0(module0_0_opt)
return opt_conv2d_0
class Module2(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module0_0_conv2d_0_in_channels,
module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels, module0_0_conv2d_2_out_channels,
module0_0_conv2d_2_stride, module0_0_conv2d_2_group, module0_0_avgpool2d_4_kernel_size,
module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels, module0_0_conv2d_7_in_channels,
module0_0_conv2d_7_out_channels):
super(Module2, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_stride=module0_0_conv2d_2_stride,
conv2d_2_group=module0_0_conv2d_2_group,
avgpool2d_4_kernel_size=module0_0_avgpool2d_4_kernel_size,
conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_2 = nn.ReLU()
def construct(self, x):
module0_0_opt = self.module0_0(x)
opt_conv2d_0 = self.conv2d_0(module0_0_opt)
opt_add_1 = P.Add()(x, opt_conv2d_0)
opt_relu_2 = self.relu_2(opt_add_1)
return opt_relu_2
class Module4(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_3_in_channels, conv2d_3_out_channels,
conv2d_6_in_channels, conv2d_6_out_channels, conv2d_9_in_channels, conv2d_9_out_channels,
module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_2_group,
module0_0_avgpool2d_4_kernel_size, module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels,
module0_0_conv2d_7_in_channels, module0_0_conv2d_7_out_channels, module0_1_conv2d_0_in_channels,
module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
module0_1_conv2d_2_stride, module0_1_conv2d_2_group, module0_1_avgpool2d_4_kernel_size,
module0_1_conv2d_5_in_channels, module0_1_conv2d_5_out_channels, module0_1_conv2d_7_in_channels,
module0_1_conv2d_7_out_channels, module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels,
module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels, module0_2_conv2d_2_stride,
module0_2_conv2d_2_group, module0_2_avgpool2d_4_kernel_size, module0_2_conv2d_5_in_channels,
module0_2_conv2d_5_out_channels, module0_2_conv2d_7_in_channels, module0_2_conv2d_7_out_channels,
module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
module0_3_conv2d_2_out_channels, module0_3_conv2d_2_stride, module0_3_conv2d_2_group,
module0_3_avgpool2d_4_kernel_size, module0_3_conv2d_5_in_channels, module0_3_conv2d_5_out_channels,
module0_3_conv2d_7_in_channels, module0_3_conv2d_7_out_channels):
super(Module4, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_stride=module0_0_conv2d_2_stride,
conv2d_2_group=module0_0_conv2d_2_group,
avgpool2d_4_kernel_size=module0_0_avgpool2d_4_kernel_size,
conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_2 = nn.ReLU()
self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
conv2d_2_stride=module0_1_conv2d_2_stride,
conv2d_2_group=module0_1_conv2d_2_group,
avgpool2d_4_kernel_size=module0_1_avgpool2d_4_kernel_size,
conv2d_5_in_channels=module0_1_conv2d_5_in_channels,
conv2d_5_out_channels=module0_1_conv2d_5_out_channels,
conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
conv2d_7_out_channels=module0_1_conv2d_7_out_channels)
self.conv2d_3 = nn.Conv2d(in_channels=conv2d_3_in_channels,
out_channels=conv2d_3_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_5 = nn.ReLU()
self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
conv2d_2_stride=module0_2_conv2d_2_stride,
conv2d_2_group=module0_2_conv2d_2_group,
avgpool2d_4_kernel_size=module0_2_avgpool2d_4_kernel_size,
conv2d_5_in_channels=module0_2_conv2d_5_in_channels,
conv2d_5_out_channels=module0_2_conv2d_5_out_channels,
conv2d_7_in_channels=module0_2_conv2d_7_in_channels,
conv2d_7_out_channels=module0_2_conv2d_7_out_channels)
self.conv2d_6 = nn.Conv2d(in_channels=conv2d_6_in_channels,
out_channels=conv2d_6_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_8 = nn.ReLU()
self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
conv2d_2_stride=module0_3_conv2d_2_stride,
conv2d_2_group=module0_3_conv2d_2_group,
avgpool2d_4_kernel_size=module0_3_avgpool2d_4_kernel_size,
conv2d_5_in_channels=module0_3_conv2d_5_in_channels,
conv2d_5_out_channels=module0_3_conv2d_5_out_channels,
conv2d_7_in_channels=module0_3_conv2d_7_in_channels,
conv2d_7_out_channels=module0_3_conv2d_7_out_channels)
self.conv2d_9 = nn.Conv2d(in_channels=conv2d_9_in_channels,
out_channels=conv2d_9_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_11 = nn.ReLU()
def construct(self, x):
module0_0_opt = self.module0_0(x)
opt_conv2d_0 = self.conv2d_0(module0_0_opt)
opt_add_1 = P.Add()(x, opt_conv2d_0)
opt_relu_2 = self.relu_2(opt_add_1)
module0_1_opt = self.module0_1(opt_relu_2)
opt_conv2d_3 = self.conv2d_3(module0_1_opt)
opt_add_4 = P.Add()(opt_relu_2, opt_conv2d_3)
opt_relu_5 = self.relu_5(opt_add_4)
module0_2_opt = self.module0_2(opt_relu_5)
opt_conv2d_6 = self.conv2d_6(module0_2_opt)
opt_add_7 = P.Add()(opt_relu_5, opt_conv2d_6)
opt_relu_8 = self.relu_8(opt_add_7)
module0_3_opt = self.module0_3(opt_relu_8)
opt_conv2d_9 = self.conv2d_9(module0_3_opt)
opt_add_10 = P.Add()(opt_relu_8, opt_conv2d_9)
opt_relu_11 = self.relu_11(opt_add_10)
return opt_relu_11
class Module3(nn.Cell):
def __init__(self):
super(Module3, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=1392,
conv2d_0_out_channels=1392,
conv2d_2_in_channels=1392,
conv2d_2_out_channels=1392,
conv2d_2_stride=(1, 1),
conv2d_2_group=6,
avgpool2d_4_kernel_size=(14, 14),
conv2d_5_in_channels=1392,
conv2d_5_out_channels=348,
conv2d_7_in_channels=348,
conv2d_7_out_channels=1392)
self.conv2d_0 = nn.Conv2d(in_channels=1392,
out_channels=1392,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_2 = nn.ReLU()
self.module0_1 = Module0(conv2d_0_in_channels=1392,
conv2d_0_out_channels=1392,
conv2d_2_in_channels=1392,
conv2d_2_out_channels=1392,
conv2d_2_stride=(1, 1),
conv2d_2_group=6,
avgpool2d_4_kernel_size=(14, 14),
conv2d_5_in_channels=1392,
conv2d_5_out_channels=348,
conv2d_7_in_channels=348,
conv2d_7_out_channels=1392)
self.conv2d_3 = nn.Conv2d(in_channels=1392,
out_channels=1392,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_5 = nn.ReLU()
def construct(self, x):
module0_0_opt = self.module0_0(x)
opt_conv2d_0 = self.conv2d_0(module0_0_opt)
opt_add_1 = P.Add()(x, opt_conv2d_0)
opt_relu_2 = self.relu_2(opt_add_1)
module0_1_opt = self.module0_1(opt_relu_2)
opt_conv2d_3 = self.conv2d_3(module0_1_opt)
opt_add_4 = P.Add()(opt_relu_2, opt_conv2d_3)
opt_relu_5 = self.relu_5(opt_add_4)
return opt_relu_5
class MindSporeModel(nn.Cell):
def __init__(self):
super(MindSporeModel, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=3,
out_channels=32,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=32,
out_channels=232,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module1_0 = Module1(conv2d_0_in_channels=232,
conv2d_0_out_channels=232,
module0_0_conv2d_0_in_channels=32,
module0_0_conv2d_0_out_channels=232,
module0_0_conv2d_2_in_channels=232,
module0_0_conv2d_2_out_channels=232,
module0_0_conv2d_2_stride=(2, 2),
module0_0_conv2d_2_group=1,
module0_0_avgpool2d_4_kernel_size=(56, 56),
module0_0_conv2d_5_in_channels=232,
module0_0_conv2d_5_out_channels=8,
module0_0_conv2d_7_in_channels=8,
module0_0_conv2d_7_out_channels=232)
self.relu_15 = nn.ReLU()
self.module2_0 = Module2(conv2d_0_in_channels=232,
conv2d_0_out_channels=232,
module0_0_conv2d_0_in_channels=232,
module0_0_conv2d_0_out_channels=232,
module0_0_conv2d_2_in_channels=232,
module0_0_conv2d_2_out_channels=232,
module0_0_conv2d_2_stride=(1, 1),
module0_0_conv2d_2_group=1,
module0_0_avgpool2d_4_kernel_size=(56, 56),
module0_0_conv2d_5_in_channels=232,
module0_0_conv2d_5_out_channels=58,
module0_0_conv2d_7_in_channels=58,
module0_0_conv2d_7_out_channels=232)
self.conv2d_29 = nn.Conv2d(in_channels=232,
out_channels=696,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module1_1 = Module1(conv2d_0_in_channels=696,
conv2d_0_out_channels=696,
module0_0_conv2d_0_in_channels=232,
module0_0_conv2d_0_out_channels=696,
module0_0_conv2d_2_in_channels=696,
module0_0_conv2d_2_out_channels=696,
module0_0_conv2d_2_stride=(2, 2),
module0_0_conv2d_2_group=3,
module0_0_avgpool2d_4_kernel_size=(28, 28),
module0_0_conv2d_5_in_channels=696,
module0_0_conv2d_5_out_channels=58,
module0_0_conv2d_7_in_channels=58,
module0_0_conv2d_7_out_channels=696)
self.relu_42 = nn.ReLU()
self.module4_0 = Module4(conv2d_0_in_channels=696,
conv2d_0_out_channels=696,
conv2d_3_in_channels=696,
conv2d_3_out_channels=696,
conv2d_6_in_channels=696,
conv2d_6_out_channels=696,
conv2d_9_in_channels=696,
conv2d_9_out_channels=696,
module0_0_conv2d_0_in_channels=696,
module0_0_conv2d_0_out_channels=696,
module0_0_conv2d_2_in_channels=696,
module0_0_conv2d_2_out_channels=696,
module0_0_conv2d_2_stride=(1, 1),
module0_0_conv2d_2_group=3,
module0_0_avgpool2d_4_kernel_size=(28, 28),
module0_0_conv2d_5_in_channels=696,
module0_0_conv2d_5_out_channels=174,
module0_0_conv2d_7_in_channels=174,
module0_0_conv2d_7_out_channels=696,
module0_1_conv2d_0_in_channels=696,
module0_1_conv2d_0_out_channels=696,
module0_1_conv2d_2_in_channels=696,
module0_1_conv2d_2_out_channels=696,
module0_1_conv2d_2_stride=(1, 1),
module0_1_conv2d_2_group=3,
module0_1_avgpool2d_4_kernel_size=(28, 28),
module0_1_conv2d_5_in_channels=696,
module0_1_conv2d_5_out_channels=174,
module0_1_conv2d_7_in_channels=174,
module0_1_conv2d_7_out_channels=696,
module0_2_conv2d_0_in_channels=696,
module0_2_conv2d_0_out_channels=696,
module0_2_conv2d_2_in_channels=696,
module0_2_conv2d_2_out_channels=696,
module0_2_conv2d_2_stride=(1, 1),
module0_2_conv2d_2_group=3,
module0_2_avgpool2d_4_kernel_size=(28, 28),
module0_2_conv2d_5_in_channels=696,
module0_2_conv2d_5_out_channels=174,
module0_2_conv2d_7_in_channels=174,
module0_2_conv2d_7_out_channels=696,
module0_3_conv2d_0_in_channels=696,
module0_3_conv2d_0_out_channels=696,
module0_3_conv2d_2_in_channels=696,
module0_3_conv2d_2_out_channels=696,
module0_3_conv2d_2_stride=(1, 1),
module0_3_conv2d_2_group=3,
module0_3_avgpool2d_4_kernel_size=(28, 28),
module0_3_conv2d_5_in_channels=696,
module0_3_conv2d_5_out_channels=174,
module0_3_conv2d_7_in_channels=174,
module0_3_conv2d_7_out_channels=696)
self.conv2d_95 = nn.Conv2d(in_channels=696,
out_channels=1392,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module1_2 = Module1(conv2d_0_in_channels=1392,
conv2d_0_out_channels=1392,
module0_0_conv2d_0_in_channels=696,
module0_0_conv2d_0_out_channels=1392,
module0_0_conv2d_2_in_channels=1392,
module0_0_conv2d_2_out_channels=1392,
module0_0_conv2d_2_stride=(2, 2),
module0_0_conv2d_2_group=6,
module0_0_avgpool2d_4_kernel_size=(14, 14),
module0_0_conv2d_5_in_channels=1392,
module0_0_conv2d_5_out_channels=174,
module0_0_conv2d_7_in_channels=174,
module0_0_conv2d_7_out_channels=1392)
self.relu_108 = nn.ReLU()
self.module4_1 = Module4(conv2d_0_in_channels=1392,
conv2d_0_out_channels=1392,
conv2d_3_in_channels=1392,
conv2d_3_out_channels=1392,
conv2d_6_in_channels=1392,
conv2d_6_out_channels=1392,
conv2d_9_in_channels=1392,
conv2d_9_out_channels=1392,
module0_0_conv2d_0_in_channels=1392,
module0_0_conv2d_0_out_channels=1392,
module0_0_conv2d_2_in_channels=1392,
module0_0_conv2d_2_out_channels=1392,
module0_0_conv2d_2_stride=(1, 1),
module0_0_conv2d_2_group=6,
module0_0_avgpool2d_4_kernel_size=(14, 14),
module0_0_conv2d_5_in_channels=1392,
module0_0_conv2d_5_out_channels=348,
module0_0_conv2d_7_in_channels=348,
module0_0_conv2d_7_out_channels=1392,
module0_1_conv2d_0_in_channels=1392,
module0_1_conv2d_0_out_channels=1392,
module0_1_conv2d_2_in_channels=1392,
module0_1_conv2d_2_out_channels=1392,
module0_1_conv2d_2_stride=(1, 1),
module0_1_conv2d_2_group=6,
module0_1_avgpool2d_4_kernel_size=(14, 14),
module0_1_conv2d_5_in_channels=1392,
module0_1_conv2d_5_out_channels=348,
module0_1_conv2d_7_in_channels=348,
module0_1_conv2d_7_out_channels=1392,
module0_2_conv2d_0_in_channels=1392,
module0_2_conv2d_0_out_channels=1392,
module0_2_conv2d_2_in_channels=1392,
module0_2_conv2d_2_out_channels=1392,
module0_2_conv2d_2_stride=(1, 1),
module0_2_conv2d_2_group=6,
module0_2_avgpool2d_4_kernel_size=(14, 14),
module0_2_conv2d_5_in_channels=1392,
module0_2_conv2d_5_out_channels=348,
module0_2_conv2d_7_in_channels=348,
module0_2_conv2d_7_out_channels=1392,
module0_3_conv2d_0_in_channels=1392,
module0_3_conv2d_0_out_channels=1392,
module0_3_conv2d_2_in_channels=1392,
module0_3_conv2d_2_out_channels=1392,
module0_3_conv2d_2_stride=(1, 1),
module0_3_conv2d_2_group=6,
module0_3_avgpool2d_4_kernel_size=(14, 14),
module0_3_conv2d_5_in_channels=1392,
module0_3_conv2d_5_out_channels=348,
module0_3_conv2d_7_in_channels=348,
module0_3_conv2d_7_out_channels=1392)
self.module4_2 = Module4(conv2d_0_in_channels=1392,
conv2d_0_out_channels=1392,
conv2d_3_in_channels=1392,
conv2d_3_out_channels=1392,
conv2d_6_in_channels=1392,
conv2d_6_out_channels=1392,
conv2d_9_in_channels=1392,
conv2d_9_out_channels=1392,
module0_0_conv2d_0_in_channels=1392,
module0_0_conv2d_0_out_channels=1392,
module0_0_conv2d_2_in_channels=1392,
module0_0_conv2d_2_out_channels=1392,
module0_0_conv2d_2_stride=(1, 1),
module0_0_conv2d_2_group=6,
module0_0_avgpool2d_4_kernel_size=(14, 14),
module0_0_conv2d_5_in_channels=1392,
module0_0_conv2d_5_out_channels=348,
module0_0_conv2d_7_in_channels=348,
module0_0_conv2d_7_out_channels=1392,
module0_1_conv2d_0_in_channels=1392,
module0_1_conv2d_0_out_channels=1392,
module0_1_conv2d_2_in_channels=1392,
module0_1_conv2d_2_out_channels=1392,
module0_1_conv2d_2_stride=(1, 1),
module0_1_conv2d_2_group=6,
module0_1_avgpool2d_4_kernel_size=(14, 14),
module0_1_conv2d_5_in_channels=1392,
module0_1_conv2d_5_out_channels=348,
module0_1_conv2d_7_in_channels=348,
module0_1_conv2d_7_out_channels=1392,
module0_2_conv2d_0_in_channels=1392,
module0_2_conv2d_0_out_channels=1392,
module0_2_conv2d_2_in_channels=1392,
module0_2_conv2d_2_out_channels=1392,
module0_2_conv2d_2_stride=(1, 1),
module0_2_conv2d_2_group=6,
module0_2_avgpool2d_4_kernel_size=(14, 14),
module0_2_conv2d_5_in_channels=1392,
module0_2_conv2d_5_out_channels=348,
module0_2_conv2d_7_in_channels=348,
module0_2_conv2d_7_out_channels=1392,
module0_3_conv2d_0_in_channels=1392,
module0_3_conv2d_0_out_channels=1392,
module0_3_conv2d_2_in_channels=1392,
module0_3_conv2d_2_out_channels=1392,
module0_3_conv2d_2_stride=(1, 1),
module0_3_conv2d_2_group=6,
module0_3_avgpool2d_4_kernel_size=(14, 14),
module0_3_conv2d_5_in_channels=1392,
module0_3_conv2d_5_out_channels=348,
module0_3_conv2d_7_in_channels=348,
module0_3_conv2d_7_out_channels=1392)
self.module3_0 = Module3()
self.module2_1 = Module2(conv2d_0_in_channels=1392,
conv2d_0_out_channels=1392,
module0_0_conv2d_0_in_channels=1392,
module0_0_conv2d_0_out_channels=1392,
module0_0_conv2d_2_in_channels=1392,
module0_0_conv2d_2_out_channels=1392,
module0_0_conv2d_2_stride=(1, 1),
module0_0_conv2d_2_group=6,
module0_0_avgpool2d_4_kernel_size=(14, 14),
module0_0_conv2d_5_in_channels=1392,
module0_0_conv2d_5_out_channels=348,
module0_0_conv2d_7_in_channels=348,
module0_0_conv2d_7_out_channels=1392)
self.conv2d_252 = nn.Conv2d(in_channels=1392,
out_channels=3712,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module1_3 = Module1(conv2d_0_in_channels=3712,
conv2d_0_out_channels=3712,
module0_0_conv2d_0_in_channels=1392,
module0_0_conv2d_0_out_channels=3712,
module0_0_conv2d_2_in_channels=3712,
module0_0_conv2d_2_out_channels=3712,
module0_0_conv2d_2_stride=(2, 2),
module0_0_conv2d_2_group=16,
module0_0_avgpool2d_4_kernel_size=(7, 7),
module0_0_conv2d_5_in_channels=3712,
module0_0_conv2d_5_out_channels=348,
module0_0_conv2d_7_in_channels=348,
module0_0_conv2d_7_out_channels=3712)
self.relu_265 = nn.ReLU()
self.avgpool2d_266 = nn.AvgPool2d(kernel_size=(7, 7))
self.flatten_267 = nn.Flatten()
self.dense_268 = nn.Dense(in_channels=3712, out_channels=1000, has_bias=True)
def construct(self, input_1):
opt_conv2d_0 = self.conv2d_0(input_1)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
module1_0_opt = self.module1_0(opt_relu_1)
opt_add_14 = P.Add()(opt_conv2d_2, module1_0_opt)
opt_relu_15 = self.relu_15(opt_add_14)
module2_0_opt = self.module2_0(opt_relu_15)
opt_conv2d_29 = self.conv2d_29(module2_0_opt)
module1_1_opt = self.module1_1(module2_0_opt)
opt_add_41 = P.Add()(opt_conv2d_29, module1_1_opt)
opt_relu_42 = self.relu_42(opt_add_41)
module4_0_opt = self.module4_0(opt_relu_42)
opt_conv2d_95 = self.conv2d_95(module4_0_opt)
module1_2_opt = self.module1_2(module4_0_opt)
opt_add_107 = P.Add()(opt_conv2d_95, module1_2_opt)
opt_relu_108 = self.relu_108(opt_add_107)
module4_1_opt = self.module4_1(opt_relu_108)
module4_2_opt = self.module4_2(module4_1_opt)
module3_0_opt = self.module3_0(module4_2_opt)
module2_1_opt = self.module2_1(module3_0_opt)
opt_conv2d_252 = self.conv2d_252(module2_1_opt)
module1_3_opt = self.module1_3(module2_1_opt)
opt_add_264 = P.Add()(opt_conv2d_252, module1_3_opt)
opt_relu_265 = self.relu_265(opt_add_264)
opt_avgpool2d_266 = self.avgpool2d_266(opt_relu_265)
opt_flatten_267 = self.flatten_267(opt_avgpool2d_266)
opt_dense_268 = self.dense_268(opt_flatten_267)
return opt_dense_268
| [
"zhangxiaoxiao16@huawei.com"
] | zhangxiaoxiao16@huawei.com |
6e4e5e3c39abdfef03c473cadda68be2c7a10fa9 | 97072bdb023dd3916d0ced6aba1c98ec0893ee01 | /tests/test_user.py | 6c75c87d9a88a701e3f63fcc1efb7784b662cc2f | [
"MIT"
] | permissive | AnumAsif/my-blogs | ed814d0559a1d84e138a02b846d2a2b85aacfebd | 8dd6d8e9e84867582dad10265203d1219c00926c | refs/heads/master | 2020-04-23T01:51:07.902859 | 2019-02-19T12:18:42 | 2019-02-19T12:18:42 | 170,826,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | import unittest
from app.models import User
class TestUserModel(unittest.TestCase):
def setUp(self):
self.user = User(password="anum123")
def test_password_setter(self):
self.assertTrue(self.user.password_hash is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.user.password
def test_password_verification(self):
self.assertTrue(self.user.verify_password('anum123')) | [
"anum@cockar.com"
] | anum@cockar.com |
2c7b79f3a9dd178f469deb356a582fe9faea12ba | a9de160510afc4f42dfb2ffbcbd46bb11151946d | /sum_of_cubes.py | d46037f985dc5a00b7a97faaa25dc41592b709d8 | [] | no_license | Zahidsqldba07/Pyground | 2baa5febfde992a366ac5ac20df846c321c80d90 | e959c9d0a19245dca269786665aec1a77cac4cee | refs/heads/main | 2023-09-02T23:40:07.977111 | 2021-11-10T18:15:40 | 2021-11-10T18:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # sum of squares of first n natural numbers
n = int(input("enter the range:"))
sum = 0
for num in range(1,n+1):
a = num**3
sum += a
print(sum)
| [
"44092199+sahil639@users.noreply.github.com"
] | 44092199+sahil639@users.noreply.github.com |
45750ed0c77b767fb0b24c89a098b91d4774a05e | e37132e2bccc4af35d07a8286f1161233f55f0a3 | /csvfile.py | 3965789ece21965520bb95affe6200515e48c8bb | [] | no_license | mannesiddhardha/python | 88fb0be67b547c1bb0d2ea6270641706a96ce064 | 90c6466ee675d28327a91c32b369028e5b5b95db | refs/heads/master | 2023-04-19T10:53:56.662047 | 2021-05-08T21:28:35 | 2021-05-08T21:28:35 | 99,147,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import csv
with open("/Users/mannesiddhardha/Desktop/test.csv",'r') as fr:
cr = csv.DictReader(fr)
for line in cr:
print(line['lastname'],line['email'])
# csv.reader(fr, delimiter=',')
# csv.reader(fr)
# we can get output in list format, parse with the index's
#csv.DictReader(fr)
#output in dictionary format, parse with the keys
# 2nd column 2nd row element in a csv file
with open('/Users/mannesiddhardha/Desktop/test.csv','r') as fr:
cr = csv.reader(fr)
# indexes parse the columns first
result = [line[2] for line in cr]
# once you have the list go to the specific row number you want
print result[2]
| [
"noreply@github.com"
] | mannesiddhardha.noreply@github.com |
88e0919771e070e602f208c9d6bbeae0dab97897 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/django/contrib/databrowse/__init__.py | 00928c97e129815852bcfa60bb1f0d6611f9a0ce | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/django/contrib/databrowse/__init__.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
959839dfd64481825899d18dbdd5495eb9ff46eb | f113a4f55b8a5f60f3b2e88657772941672a399c | /app/plugin/biu/do/unfollow.py | bffece69aca2ee0c4ce589c432bbb05662bcb217 | [
"MIT"
] | permissive | isouland/PixivBiu | 46e53d769cf4c64f9ec4551c7f5ddc09bc0c8ddb | 181cb2e574d54757bcfa60c8bae2c1d69e7d8189 | refs/heads/master | 2022-04-16T07:51:16.806545 | 2020-02-25T09:48:50 | 2020-02-25T09:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | # pylint: disable=relative-beyond-top-level
from ....platform import CMDProcessor
@CMDProcessor.plugin_register("api/biu/do/unfollow")
class doUnFollow(object):
def __init__(self, MOD):
self.MOD = MOD
def pRun(self, cmd):
if self.MOD.biu.apiType != "public":
return {"code": 0, "msg": "only support public api"}
try:
args = self.MOD.args.getArgs(
"unfollow",
[
"userID",
(
"restrict=%s"
% self.MOD.biu.sets["biu"]["common"]["defaultActionType"]
),
],
)
except:
return {"code": 0, "msg": "missing parameters"}
return {
"code": 1,
"msg": {
"way": "do",
"args": args,
"rst": self.unFollow(args["ops"].copy(), args["fun"].copy()),
},
}
def unFollow(self, opsArg, funArg):
self.MOD.args.argsPurer(
funArg, {"userID": "user_ids", "restrict": "publicity"}
)
r = self.MOD.biu.api.me_favorite_users_unfollow(**funArg)
return {"api": "public", "data": r}
| [
"txperl@gmail.com"
] | txperl@gmail.com |
0b5d2010d4ff6dc7c4f0e7f346bd22f223a72f72 | d0f3b01fec0efddf5d8e5585897b14bc3aa76ab7 | /download_data.py | 020ce836dba2fe1119b01845a3822300d48403dc | [] | no_license | vlanabere/Paper_GRAPE | 31b7f5adc47c8b306ae7fdf5ed32bfe34adb366e | 31cfb2be1a36aa6ba50420707ed07e7aab0e139d | refs/heads/master | 2023-02-25T12:38:09.751925 | 2021-01-31T20:03:20 | 2021-01-31T20:03:20 | 334,739,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
import numpy as np
from datetime import datetime, timedelta
def dst(fecha):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Fecha en formato yyyymm
url = 'http://wdc.kugi.kyoto-u.ac.jp/dst_realtime/'+fecha+'/index.html'
print(url)
html = urllib.request.urlopen(url, context=ctx).read()
#Abro el html
soup = BeautifulSoup(html, 'html.parser')
#Me fije en que tag esta la data que quiero, se llama pre
table = soup.find("pre").contents
#separo por /n (son espacios que hay en determinadas horas), elimino los strings que no me interesan
u=table[2].split('\n')[7:-1]
str_list = list(filter(None, u))
np.savetxt("dst.txt", str_list, delimiter=",", fmt='%s')
# Separo por espacios
new_list=list()
for s in str_list:
tmp = map(''.join, zip(*[iter(s[3:35])]*4))
new_list.extend(tmp)
new_list.append(s[35:40])
tmp = map(''.join, zip(*[iter(s[40:69])]*4))
new_list.extend(tmp)
new_list.append(s[69:74])
tmp = map(''.join, zip(*[iter(s[74:])]*4))
new_list.extend(tmp)
new_list.append(s[-4:])
dst_data = np.asarray(new_list,dtype=np.float32)
escala_temporal=[]
date_start = datetime.strptime(fecha, '%Y%m')
date_end = date_start + timedelta(days=31)
while date_start < date_end:
escala_temporal.append(date_start)
date_start +=timedelta(hours=1)
return escala_temporal, dst_data
def kp(file):
myfile = open(file)
data_full = []
for line in myfile:
data = line.strip('\n')
data_full.append(data)
return data_full
| [
"vanina@at.fcen.uba.ar"
] | vanina@at.fcen.uba.ar |
809cb2ce5e45fa69515125514969156d1cc2b361 | df1ad0d061f7982edd0d5e159a1abc31daa8fd4c | /MapEngine/urls.py | 3897fe23f364a999aba4acff7fe86801c740d287 | [
"Apache-2.0"
] | permissive | todor943/mapEngine | f6c056ca1b2fcf6207d5a1bf2b5908f062fff353 | 560c4e9bee2ef20e988124030db801337d8722f1 | refs/heads/master | 2020-12-24T18:55:13.667780 | 2017-11-06T19:54:04 | 2017-11-06T19:54:04 | 57,469,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | """MapEngine URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^/?', include('MapApp.urls')),
url(r'^api/?', include('MapApi.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| [
"tmarkov@sevone.com"
] | tmarkov@sevone.com |
6416d21e330f6923f19a06e51308eeb8b9f4168b | f26d67e3e9f8b90e5d6243279a1c2ce87fa41d46 | /tests/api/test_prodstats.py | c06973ae4a39c46587d14ccdcf139af25afd3c4a | [
"MIT"
] | permissive | OCB-DS/prodstats | cf554e3abee651463e9f81606d4b633f464658a7 | 4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824 | refs/heads/master | 2022-11-25T15:30:06.988683 | 2020-08-02T16:08:05 | 2020-08-02T16:08:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,722 | py | # import logging
# import pandas as pd
# import pytest
# import starlette.status as codes
# from db.models import ProdStat as Model
# from tests.utils import rand_str
# logger = logging.getLogger(__name__)
# pytestmark = pytest.mark.asyncio
# @pytest.fixture(scope="session")
# def prodstat_records(json_fixture):
# yield json_fixture("prodstats.json")
# @pytest.fixture
# def prodstat_df(prodstat_records):
# yield pd.DataFrame(prodstat_records).set_index(["api10", "prod_date"])
# @pytest.fixture(autouse=True)
# async def seed_prodstats(bind, prodstat_records):
# await Model.bulk_insert(prodstat_records)
# class TestPlaceEndpoint:
# path: str = "/api/v1/prodstats"
# async def test_create_prodstat(self, client):
# prodstat_name = "test"
# response = await client.post(self.path, json=[{"name": prodstat_name}])
# assert response.status_code == codes.HTTP_202_ACCEPTED
# async def test_list_prodstats(self, client):
# expected_record_count = 25
# response = await client.get(self.path)
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert len(data) == expected_record_count
# assert response.links["next"] is not None
# async def test_get_prodstat(self, client):
# id = 20
# response = await client.get(f"{self.path}/{id}")
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert data["id"] == 20
# async def test_update_exising_prodstat(self, client):
# id = 10
# value = rand_str(length=8)
# response = await client.put(f"{self.path}/{id}", json={"state": value})
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert data["id"] == id
# assert data["state"] == value
# async def test_update_prodstat_not_found(self, client):
# id = 99999
# value = rand_str(length=8)
# response = await client.put(f"{self.path}/{id}", json={"state": value})
# assert response.status_code == codes.HTTP_404_NOT_FOUND
# async def test_delete_existing_prodstat(self, client):
# id = 20
# response = await client.delete(f"{self.path}/{id}")
# assert response.status_code == codes.HTTP_200_OK
# data = response.json()
# assert data["id"] == id
# async def test_delete_prodstat_not_found(self, client):
# id = 99999
# response = await client.delete(f"{self.path}/{id}")
# assert response.status_code == codes.HTTP_404_NOT_FOUND
# data = response.json()
# assert data["detail"] == "prodstat not found"
| [
"brocklfriedrich@gmail.com"
] | brocklfriedrich@gmail.com |
c4f6240e36dda575fc9260c1c97b59f86c3a335a | 251892cf89523ded40f53fdd95812a4bd2290e67 | /dropdown.py | 233a5f72051b7e68b6ef7beb2b4134f3a242cd7f | [] | no_license | daianalfonso/AutomationFrameworkPython | 0e268917bb2f0d30d5a4998ff2e9abfca745efcf | 733cd7c8365112cd0dbf017afe0291eeed5803bc | refs/heads/master | 2022-11-08T19:55:35.047074 | 2020-06-26T00:17:20 | 2020-06-26T00:17:20 | 275,040,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
driver = webdriver.Chrome ('chromedriver.exe')
driver.get ('http://newtours.demoaut.com/')
time.sleep(2)
driver.find_element_by_link_text('REGISTER').click()
countryDropDown = Select (driver.find_element_by_name('country'))
countryDropDown.select_by_index(5)
countryDropDown.select_by_value('11')
countryDropDown.select_by_visible_text('CONGO')
time.sleep(4)
driver.quit()
| [
"alfonsodaiana92@gmail.com"
] | alfonsodaiana92@gmail.com |
7589906133199bddaf9c5d9c0f88472ab35605fa | 8bb055b9ce83955a9e7f3a8cea57453a53bb2d76 | /hashing/set_matrix_zeroes.py | c4cfce06ba1cfce17148c26c1088c6e5124a0576 | [] | no_license | kmaheshwari/ds_and_algo | 6a127c483c8df381162f3ec978962171341c6ec4 | a693cc53ac45a1af253b4e5f97b021f215948d39 | refs/heads/master | 2020-09-16T04:11:26.627345 | 2019-11-23T20:32:09 | 2019-11-23T20:32:09 | 223,649,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | class Solution:
# Set Matrix Zeros
# Given a matrix, A of size M x N of 0s and 1s. If an element is 0, set its entire row and column to 0. Note: This will be evaluated on the extra memory used. Try to minimize the space and time complexity.
# Input Format:
# The first and the only argument of input contains a 2-d integer matrix, A, of size M x N.
# Output Format:
# Return a 2-d matrix that satisfies the given conditions.
# Constraints:
# 1 <= N, M <= 1000
# 0 <= A[i][j] <= 1
# Examples:
# Input 1:
# [ [1, 0, 1],
# [1, 1, 1],
# [1, 1, 1] ]
# Output 1:
# [ [0, 0, 0],
# [1, 0, 1],
# [1, 0, 1] ]
# Input 2:
# [ [1, 0, 1],
# [1, 1, 1],
# [1, 0, 1] ]
# Output 2:
# [ [0, 0, 0],
# [1, 0, 1],
# [0, 0, 0] ]
# @param A : list of list of integers
# @return the same list modified
def setZeroes(self, A):
m = len(A)
n = len(A[0])
set_1_row = False
set_1_column = False
for i in range(0, m):
for j in range(0, n):
if (i == 0 and A[i][j] == 0):
set_1_row = True
if (j == 0 and A[i][j] == 0):
set_1_column = True
if A[i][j] == 0:
A[i][0] = 0
A[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if A[i][0] == 0 or A[0][j] == 0:
A[i][j] = 0
if set_1_row == True:
for j in range(1, n):
A[0][j] = 0
if set_1_column == True:
for i in range(1, m):
A[i][0] = 0
return A
| [
"maheshwari.kajol@gmail.com"
] | maheshwari.kajol@gmail.com |
e9c22973833e68e080736b6dc8759c782f005479 | 78eac5452e96a19ebe5ca8d2c8bd396439bf1437 | /mysite/blog/migrations/0001_initial.py | 9d6345141eed3ba327e97fd68815d5492679ebee | [] | no_license | wucarek/my-first-blog | 892799d89e610078d7e8c7f07a5c23802d784d7f | c15b53d6cf4829f859d9431cbba54695d4322637 | refs/heads/master | 2023-02-10T07:00:14.744723 | 2021-01-11T15:25:12 | 2021-01-11T15:25:12 | 328,668,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.2.17 on 2021-01-11 14:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"a.szczepek@wp.pl"
] | a.szczepek@wp.pl |
dbbaa07f9a98c3e9e22c7689a290eb5bead8165e | cbe9a0a8310637760bbeb7856795effa2ab23598 | /node_modules/uws/build/config.gypi | 78d42950443a6ba7a2a5184af936c77cec71e656 | [
"Zlib"
] | permissive | sruan6/portfolio_v2 | 309f6e36f535de9d6297bb297aaa010110d42e6e | 96ca5e2df47c6e8bb6610caa34dc3f65efdd681d | refs/heads/master | 2023-03-04T22:45:43.287555 | 2018-03-28T21:49:09 | 2018-03-28T21:49:09 | 127,062,387 | 0 | 0 | null | 2022-12-02T00:48:59 | 2018-03-28T00:41:31 | HTML | UTF-8 | Python | false | false | 4,879 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/8.9.1",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "9.0",
"nodedir": "/Users/Stephen/.node-gyp/8.9.1",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/Stephen/.npm-init.js",
"userconfig": "/Users/Stephen/.npmrc",
"cidr": "",
"node_version": "8.9.1",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/Stephen/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/5.6.0 node/v8.9.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/m7/789bh0610t1bkzygywr4wqsr0000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
| [
"Stephen@Stephens-MacBook-Pro.local"
] | Stephen@Stephens-MacBook-Pro.local |
292ee9ea10c86bc5da3528223110724fdfec5104 | b43d4b2c7803aa97d11711a0405d265089c39aed | /src/google/cloud/ndb/_remote.py | fea024a5fd30856812b3422f03934091c5f2c631 | [
"Apache-2.0"
] | permissive | takashi8/python-ndb | a3b2b480cd490c7de6fd2683ab961d4449742e78 | f662c16a518efc2fb65598932e7c3524ecec3957 | refs/heads/master | 2020-06-01T17:21:42.884858 | 2019-06-24T22:29:52 | 2019-06-24T22:29:52 | 185,898,553 | 0 | 0 | Apache-2.0 | 2019-05-10T01:41:54 | 2019-05-10T01:41:53 | null | UTF-8 | Python | false | false | 1,879 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for information about remote calls."""
# In its own module to avoid circular import between _datastore_api and
# tasklets modules.
class RemoteCall:
"""Represents a remote call.
This is primarily a wrapper for futures returned by gRPC. This holds some
information about the call to make debugging easier. Can be used for
anything that returns a future for something running outside of our own
event loop.
Arguments:
future (Union[grpc.Future, tasklets.Future]): The future handed back
from initiating the call.
info (str): Helpful human readable string about the call. This string
will be handed back verbatim by calls to :meth:`__repr__`.
"""
def __init__(self, future, info):
self.future = future
self.info = info
def __repr__(self):
return self.info
def exception(self):
"""Calls :meth:`grpc.Future.exception` on attr:`future`."""
return self.future.exception()
def result(self):
"""Calls :meth:`grpc.Future.result` on attr:`future`."""
return self.future.result()
def add_done_callback(self, callback):
"""Calls :meth:`grpc.Future.add_done_callback` on attr:`future`."""
return self.future.add_done_callback(callback)
| [
"noreply@github.com"
] | takashi8.noreply@github.com |
8e3b3c81c0c614f310d3cacfaea2b523e16773bf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_344/ch46_2019_03_19_20_17_36_654772.py | 5d9a674902235b456d5687c756b4218f596434d0 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | lista_palavras = []
palavra= input('Escreva uma palavra: ')
while palavra != 'fim':
lista_palvras.append(palavra)
palavra= input('Escreva outra palavra')
if palavra[0] = 'a':
print (palavra)
| [
"you@example.com"
] | you@example.com |
4a091b6d0f3d4382e67c122acfa7fa4c68c9ce22 | 3cd5fe995670963e5e94918ba9f1796e9e7cb73f | /2.7/ipython/profile_default/ipython_config.py | a6a72a3e3e16cbe3fa84b7e6893e98aa0bbcbb58 | [] | no_license | GrahamDumpleton-abandoned/s2i-ipython-notebook | 1e0a7ff4c893b5b743bd250535cedf4b0ed0988f | 33246956d6ba32384f678d4803148ac964b0befe | refs/heads/master | 2021-05-31T04:53:03.679233 | 2016-04-02T09:28:01 | 2016-04-02T09:28:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,830 | py | # Configuration file for ipython.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = traitlets.Undefined
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = traitlets.Undefined
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = traitlets.Undefined
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# Set the log level by value or name.
# c.Application.log_level = 30
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# BaseIPythonApplication configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = u''
# Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = u''
# The IPython profile to use.
# c.BaseIPythonApplication.profile = u'default'
# Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
#------------------------------------------------------------------------------
# InteractiveShell configuration
#------------------------------------------------------------------------------
# An enhanced, interactive shell for Python.
#
# c.InteractiveShell.ipython_dir = ''
# Set the color scheme (NoColor, Linux, or LightBG).
# c.InteractiveShell.colors = 'Linux'
#
# c.InteractiveShell.debug = False
# The name of the logfile to use.
# c.InteractiveShell.logfile = ''
#
# c.InteractiveShell.object_info_string_level = 0
#
# c.InteractiveShell.separate_out = ''
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.InteractiveShell.cache_size = 1000
#
# c.InteractiveShell.quiet = False
#
# c.InteractiveShell.xmode = 'Context'
# **Deprecated**
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.InteractiveShell.deep_reload = False
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
#
# c.InteractiveShell.readline_remove_delims = '-/~'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
# Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = 'Python 2.7.10 (default, Nov 15 2015, 22:51:08) \nType "copyright", "credits" or "license" for more information.\n\nIPython 4.0.1 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
# Save multi-line entries as one entry in readline history
# c.InteractiveShell.multiline_history = True
#
# c.InteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in_template
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
# Deprecated, use PromptManager.in2_template
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
# Deprecated, use PromptManager.out_template
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
# Deprecated, use PromptManager.justify
# c.InteractiveShell.prompts_pad_left = True
#
# c.InteractiveShell.separate_out2 = ''
# Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = traitlets.Undefined
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
# Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
#
# c.InteractiveShell.history_length = 10000
# Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
# The number of saved history entries to be loaded into the readline buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
#
# c.InteractiveShell.readline_parse_and_bind = traitlets.Undefined
#
# c.InteractiveShell.wildcards_case_sensitive = True
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
# Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
#
# c.InteractiveShell.readline_use = True
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
#------------------------------------------------------------------------------
# HistoryAccessorBase configuration
#------------------------------------------------------------------------------
# An abstract class for History Accessors
#------------------------------------------------------------------------------
# HistoryAccessor configuration
#------------------------------------------------------------------------------
# Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryAccessor.connection_options = traitlets.Undefined
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryAccessor.hist_file = u''
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# BaseFormatter configuration
#------------------------------------------------------------------------------
# A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
#
# c.BaseFormatter.singleton_printers = traitlets.Undefined
#
# c.BaseFormatter.type_printers = traitlets.Undefined
#
# c.BaseFormatter.deferred_printers = traitlets.Undefined
#
# c.BaseFormatter.enabled = True
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.newline = '\n'
#------------------------------------------------------------------------------
# Completer configuration
#------------------------------------------------------------------------------
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
#------------------------------------------------------------------------------
# Magics configuration
#------------------------------------------------------------------------------
# Base class for implementing magic functions.
#
# Shell functions which can be reached as %function_name. All magic functions
# should accept a string, which they can parse for their own needs. This can
# make some functions easier to type, eg `%cd ../` vs. `%cd("../")`
#
# Classes providing magic functions need to subclass this class, and they MUST:
#
# - Use the method decorators `@line_magic` and `@cell_magic` to decorate
# individual methods as magic functions, AND
#
# - Use the class decorator `@magics_class` to ensure that the magic
# methods are properly registered at the instance level upon instance
# initialization.
#
# See :mod:`magic_functions` for examples of actual implementation classes.
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = traitlets.Undefined
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = traitlets.Undefined
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| [
"Graham.Dumpleton@gmail.com"
] | Graham.Dumpleton@gmail.com |
33b4d2fa38cb404e2645144a6a3fee4a220b4933 | b697bd0069363f858704cddaae3a6cadd1370df9 | /tests/test_models.py | bae063459f23af447d445e42cb6b1605eb4d92bf | [] | no_license | WillChilds-Klein/bulb-api | c0c140198e7f02bb2d4217e9339cec3ef5675e51 | b8dba15cd7825d182fda75a617cf2e02f62dccee | refs/heads/master | 2021-03-27T15:57:15.664070 | 2017-07-19T04:07:31 | 2017-07-20T02:19:41 | 87,000,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from bulb_api.models import BulbModel
def test_get_hash_key_name():
pass
def test_get_unused_uuid():
pass
def test_to_dict():
pass
def test_update_from_dict():
pass
# TODO: create little tool to test for isomorphisms between pynamo models
# attr types and swagger obj attr types.
| [
"willck93@gmail.com"
] | willck93@gmail.com |
7e69e6b230ecacd463082cc3cf767002e2ed1e73 | 834c37d22d684ea8630ba51be0e038cabf82249b | /scpTestImage.py | d93bc9b301fcbe296d3dc7ce8ffa0e5441b564f0 | [] | no_license | StevenDCK/scripts | 93860e72e12f2947d5b24c574c39c431332a9f9b | b2400308f63b5e9e99ccb8a14c2d745e613f2dd2 | refs/heads/master | 2022-04-19T16:34:55.275819 | 2020-04-19T14:13:39 | 2020-04-19T14:13:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding:utf8 -*-
import os
import pdb
#pdb.set_trace()
g_NameSet = 'testPerson.txt'
g_SrcImagePath = '/home1/wiwide_data/train_image/person/JPEGImages'
g_DstImagePath = '/home1/wiwide_data/train_image/person/JPEGImages/'
if __name__=="__main__":
Fin = open(g_NameSet,'r')
Files = Fin.readlines()
Fin.close()
i = 0
for f in Files:
i = i+1
#if 3>len(f):continue
f = f[0:-1]
if os.path.exists(f):continue
cmd = 'scp ' + ' dbs@172.16.105.172:'+f + ' ' + g_DstImagePath
os.system(cmd)
if 0==(i&127): os.system('echo ' + str(i) + ' >> scplogtest.txt') | [
"eco.deng@wiwide.com"
] | eco.deng@wiwide.com |
af40ec2a3216a96686e36405aa020664d8c3bbbb | ea03a006c60e67653753cf6cc4d5363c71574973 | /lab1/accounts.py | e8119628b69412e745c22c68e114af1d0c9e2720 | [] | no_license | nmhoangg2000/lab-c4e24 | af2d20fccbc3bcbb6e3c61f4edd8c7dc1bd35ff1 | 7086fbbb34b4606bdfadfd6d7b15a1c0fad8f0a3 | refs/heads/master | 2020-04-10T13:48:43.612546 | 2018-12-24T10:49:24 | 2018-12-24T10:49:24 | 161,059,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | from pymongo import MongoClient
uri = "mongodb://admin:admin1@ds227654.mlab.com:27654/c4e24-lab1"
client = MongoClient(uri)
db = client.get_database()
post_collection = db["accounts"]
accounts = [
{
"username": "taoooonguuu",
"email":"nmhoangxxi2000@gmail.com",
"phone":"0123456789",
"password":"hoang2510",
"yob":"2000"
},
{
"username": "taooonguuu",
"email":"nmhoanxxi2000@gmail.com",
"phone":"012356789",
"password":"hoang2510",
"yob":"2001"
},
{
"username": "taoooonuuu",
"email":"nmhoangxi2000@gmail.com",
"phone":"012345789",
"password":"hoang2510",
"yob":"2001"
}
]
for i in range (len(accounts)):
post_collection.insert_one(accounts)
client.close() | [
"square2000nguyen@gmail.com"
] | square2000nguyen@gmail.com |
a3d794c9cc14c2302fce4075d20c37c5426c83ab | da46cfffc8c234b0c8f6db53a073c01ce9f41a43 | /hummingbird/ml/operator_converters/_pipeline_implementations.py | 69516ca0ef6e18aa306d5db5b2e0fea31ae37db9 | [
"MIT"
] | permissive | amrofi/hummingbird | 53a75d97c0fdf03b821f5cf0cd3fae23313a6b0d | 28e13a6391e617205dc1518bf40bec082f3250b8 | refs/heads/master | 2022-12-26T22:16:41.676025 | 2020-10-06T00:21:08 | 2020-10-06T00:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Converters for operators necessary for supporting scikit-learn Pipelines.
"""
import numpy as np
from onnxconverter_common.registration import register_converter
import torch
from ._base_operator import BaseOperator
class Concat(BaseOperator, torch.nn.Module):
def __init__(self):
super(Concat, self).__init__()
def forward(self, *x):
if len(x[0].shape) > 1:
return torch.cat(x, dim=1)
else:
return torch.stack(x, dim=1)
| [
"noreply@github.com"
] | amrofi.noreply@github.com |
b66cc94e267439ca20172075c33b255396a54cd2 | fe0836d668808c58d51b23bc05a43d508c202b4c | /algorithms/__init__.py | 7a0c714db963ae4db88c29c9fa8cbea2e61730be | [] | no_license | dgchachlakis/Novel-Algorithms-for-Lp-quasi-norm-Principal-Component-Analysis | d6c530ee0ef480aee3cb136ba1c4d92bfb644a9b | 98e0826af8cc167704bb4b978c23cf93a8d26227 | refs/heads/main | 2023-02-24T16:28:15.839404 | 2021-01-15T02:55:07 | 2021-01-15T02:55:07 | 327,658,146 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from .bitflipping import bitflipping
from .bitflipping_deflation import bitflipping_deflation | [
"dimitris@mail.rit.edu"
] | dimitris@mail.rit.edu |
f29fab386e56616de8c34b734ef2137ce87edd4a | a08c41f47cd59dace0a8732b47de2d9f03092fd1 | /pyterprise/user.py | 1a5d39edeb6f58fbfd52ecd5dab1de0600464913 | [
"MIT"
] | permissive | dhurley94/terraform-enterprise-api-python-client | 204284722a0c47964c4d6c564355634d9f012d6e | 19c8b6014761790298259a8e2c32907919766b61 | refs/heads/master | 2020-12-23T09:52:30.954044 | 2020-02-03T02:53:37 | 2020-02-03T02:53:37 | 237,116,198 | 0 | 0 | null | 2020-01-30T01:20:06 | 2020-01-30T01:20:05 | null | UTF-8 | Python | false | false | 509 | py |
class User(object):
def __init__(self, user):
self.id = user.id
attributes = user.attributes
self.username = attributes.username
self.is_service_account = attributes.is_service_account
self.avatar_url = attributes.avatar_url
self.v2_only = attributes.v2_only
self.permissions = attributes.permissions
relationships = user.relationships
self.authentication_tokens = relationships.authentication_tokens
self.links = user.links | [
"david@rizse.iom"
] | david@rizse.iom |
3641978af95904682d38d008c2309cf8e3703e1c | d3f8c787ed6347639be41899a10be1ffde45b87d | /week 5/coordinate.py | 1451df55816e23d4fb549637df6dd8c25512ccff | [] | no_license | sanjeevr11/ZeMoSo | c12e1365b23fce9704d645e436ab070a389b472e | 15e536320ac939a931489810f368b0f4c6457c33 | refs/heads/master | 2021-05-11T09:42:43.473993 | 2018-02-15T07:48:52 | 2018-02-15T07:48:52 | 118,084,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | class Coordinate(object):
def __init__(self,x,y):
self.x = x
self.y = y
def getX(self):
# Getter method for a Coordinate object's x coordinate.
# Getter methods are better practice than just accessing an attribute directly
return self.x
def getY(self):
# Getter method for a Coordinate object's y coordinate
return self.y
def __str__(self):
return '<' + str(self.getX()) + ',' + str(self.getY()) + '>'
def __eq__(self, other):
if other.x == self.x and other.y == self.y:
return True
else:
return False
def __repr__(self):
return "Coordinate"+ str((self.x, self.y))
| [
"noreply@github.com"
] | sanjeevr11.noreply@github.com |
465a949f22f4054272cd2405ab347fe896601cd4 | 39989918edbff18ff204d6789546229438e3d864 | /notes/41 - PyGame/bare bones.py | 4d53bb279d5502b1822f9cc32e51a6f0345d895b | [
"MIT"
] | permissive | huynhkGW/ICS-Python-Notes | cb79001cece3b872fcb02968041bf2923ada0ea7 | 22bf3dc2196bdc8a2372e79be63b32164255dcf7 | refs/heads/master | 2023-07-18T07:12:47.357536 | 2021-09-04T23:48:48 | 2021-09-04T23:48:48 | 403,177,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | import pygame
def main():
""" Set up the game and run the main game loop """
pygame.init() # Prepare the pygame module for use
surfaceSize = 480 # Desired physical surface size, in pixels.
clock = pygame.time.Clock() #Force frame rate to be slower
# Create surface of (width, height), and its window.
mainSurface = pygame.display.set_mode((surfaceSize, surfaceSize))
# Set up some data to describe a small circle and its color
circleColor = (255, 0, 0) # A color is a mix of (Red, Green, Blue)
while True:
ev = pygame.event.poll() # Look for any event
if ev.type == pygame.QUIT: # Window close button clicked?
break # ... leave game loop
# Update your game objects and data structures here...
# We draw everything from scratch on each frame.
# So first fill everything with the background color
mainSurface.fill((0, 200, 255))
# Draw a circle on the surface
pygame.draw.circle(mainSurface, circleColor, (50,100), 20)
# Now the surface is ready, tell pygame to display it!
pygame.display.flip()
clock.tick(60) #Force frame rate to be slower
pygame.quit() # Once we leave the loop, close the window.
main() | [
"86579691+huynhkGW@users.noreply.github.com"
] | 86579691+huynhkGW@users.noreply.github.com |
cb444cb6f5d22f2864c28385b794f079f161b9db | 3b2a97eee186ae1e660fd179b412192479c832a0 | /mainapp/urls.py | e6f7fb43551480d1271fc5f53c9c04227c1b95e5 | [] | no_license | vips21/practice12 | 77336bca1b0be140d7be6634d5c2fb3a23fdb3f7 | 13080255457feb22b90bfcc8a83a45af7b618f58 | refs/heads/main | 2023-02-17T03:32:14.617248 | 2021-01-20T13:07:36 | 2021-01-20T13:07:36 | 331,302,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | """mainapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.contrib.staticfiles.urls import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('account.urls'))
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"vprajapati@codal.com"
] | vprajapati@codal.com |
7da6315fe3bd2a36e17fe506d8ab3e19127e2276 | adb294da5d223731ab42430a9b47d730a0afa709 | /client.py | 9b351b07fc6efa1098d8c85bca8345932c827004 | [] | no_license | marianisar/denial-protection | 95c50f1ce39cc3c1f6f895258f43348a9efc0877 | 62fc6f882df5da44b32cd44ae303b87616da5a25 | refs/heads/master | 2020-07-29T07:04:38.946416 | 2019-09-20T05:22:48 | 2019-09-20T05:22:48 | 209,708,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | #############################################################################
# #
# Copyright 2019 MARIA NISAR. #
# All Rights Reserved. #
# #
# #
#############################################################################
'''
Created on SEP 16, 2019
@author: Maria Nisar
'''
import requests
import sys
if __name__=="__main__":
user_id=input('Enter the number of HTTP clients to simulate :')
data={'user_id':user_id}
url='http://localhost:8080'
req=requests.post(url,data=data)
sys.stdout.write((req.content)+'\n')
| [
"noreply@github.com"
] | marianisar.noreply@github.com |
5d92375b837aad65b7fc6cd3e6d5990c37aa2cac | 816111c67ebf078c79e31a8a1269189ee1768829 | /instrument爬虫/updata_excel.py | c4141a0b96360239dfc7f3c6b3971433c40630df | [] | no_license | ZZF98/PythonScript | 1837c22fd5acdf6c102e643ed3fb852f7517dea8 | 3b6c0730f77b894e183fe2388b608a1f134ed7c4 | refs/heads/master | 2021-07-25T06:13:17.598506 | 2020-07-10T07:42:51 | 2020-07-10T07:42:51 | 197,695,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | import xlrd
from xlutils.copy import copy
if __name__ == '__main__':
# 读取
wb = xlrd.open_workbook('instrument.xlsx')
sheetNameArr = wb.sheet_names()
sheet = wb.sheet_by_name(sheetNameArr[0])
# copy,获取sheet
workbooknew = copy(wb)
ws = workbooknew.get_sheet(0)
print(sheet.nrows, sheet.ncols)
col0 = ''
col1 = ''
col2 = ''
row_r = 0
for row in range(sheet.nrows):
print(row_r, sheet.cell(row, 0).value, sheet.cell(row, 1).value, sheet.cell(row, 2).value)
if col0 == '':
col0 = sheet.cell(row, 0).value
if col1 == '':
col1 = sheet.cell(row, 1).value
if col2 == '':
col2 = sheet.cell(row, 2).value
if row_r != 0:
if col0 == sheet.cell(row, 0).value:
ws.write(row, 0, '')
else:
col0 = sheet.cell(row, 0).value
if col1 == sheet.cell(row, 1).value:
ws.write(row, 1, '')
else:
col1 = sheet.cell(row, 1).value
if col2 == sheet.cell(row, 2).value:
ws.write(row, 2, '')
else:
col2 = sheet.cell(row, 2).value
row_r = row_r + 1
# print(sheet.cell(row, 1).value)
# print(sheet.cell(row, 2).value)
workbooknew.save(u'instrument_copy.xlsx')
| [
"zhangzefeng98@QQ.com"
] | zhangzefeng98@QQ.com |
50a63199a21d529b0c43e3e0e4d4d29a57cb9eb4 | b303d8d9f8866237cd869babcd690331d8e7e104 | /commandLine/blockChain.py | bdee21036f286273d4dbb9309aabc2ef3eec3db5 | [
"MIT"
] | permissive | Rasesh2005/BlockChain-Model-Python | d833bf6bd4a8e607488448753a393b7117f69b59 | a4299afc7a0ae900ecac2f776fdd52973c0e467d | refs/heads/master | 2023-02-25T14:04:26.997701 | 2021-02-09T19:35:55 | 2021-02-09T19:35:55 | 335,051,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,749 | py | from block import Block
from transaction import Transaction
class BlockChain:
"""
A class for Making A Block In A Blockchain
...
Attributes
----------
pendingTransactions : list
A variable storing all the pending transactions to be executed by next block
chain : list
A list Representing the Blockchain containing Genesis Block(first block without transaction)
difficulty : int
The difficulty of mining a block
refer Block.mineBlock function for detail
miningReward : int
The Number of coins a miner gets as rewrd for mining a block
Methods
-------
createGenesisBlock():
returns A Genesis Block
getLatestBlock():
returns The Last Block in blockchain
minePendingTransactions():
fills a new block with all pending transactions,
mines it and then adds it to bockchain.
addTransaction(transaction):
adds new transaction in pendingTransactions list
getBalanceOfAddress(address):
returns balance of user (address)
isChainValid():
checks for data tamper in Blockchain,
returns true if valid and false if some data is tampered
"""
def __init__(self) -> None:
self.pendingTransactions=[]
self.chain=[self.createGenesisBlock()]
self.difficulty=3
# Miners Get coins as reward on mining a new block...
self.miningReward=100
def createGenesisBlock(self)->Block:
"""
creates A Genesis Block and returns it
"""
return Block([self.pendingTransactions],previousHash="0000")
def getLatestBlock(self)->Block:
"""
returns The Last Block in blockchain
"""
return self.chain[-1]
def minePendingTransactions(self,miningRewardAddress)->None:
"""
fills a new block with all pending transactions,
mines it and then adds it to bockchain.
Parameter
---------
miningRewardAddress : PublicKey
the public key of the user's bitcoin wallet
"""
block=Block(transactions=self.pendingTransactions,previousHash=self.getLatestBlock().hash)
block.mineBlock(self.difficulty,miningRewardAddress)
print(f"Block Successfully mined")
self.chain.append(block)
# adding the miningreward to the user who mined current block
self.pendingTransactions=[
Transaction(None,miningRewardAddress,self.miningReward)
]
def addTransaction(self,transaction:Transaction,public_key)->None:
"""
adds new transaction in pendingTransactions list
"""
if not transaction.sentFrom or not transaction.sentTo:
raise Exception("Transaction Must Contain From and To Address");
if not transaction.isValid(public_key):
raise Exception("Cannot add invalid transaction to chain")
self.pendingTransactions.append(transaction)
def getBalanceOfAddress(self,address:str)->int:
"""
returns balance of user (address)
Parameter
---------
address : PublicKey
the public key of the user's bitcoin wallet whose balance is to be calculated
"""
balance=0
for block in self.chain:
for t in block.transactions:
if t.sentFrom==address:
balance-=t.amount
if t.sentTo==address:
balance+=t.amount
return balance
# Adding A Block Without Reward
# Not in use
def addBlock(self,newBlock:Block)->None:
newBlock.previousHash=self.getLatestBlock().hash
newBlock.mineBlock(self.difficulty)
self.chain.append(newBlock)
def isChainValid(self,public_key)->bool:
"""
checks for data tamper in Blockchain,
returns true if valid and false if some data is tampered
Parameter
---------
public_key : PublicKey
the public key of the user's bitcoin wallet
"""
for i in range(1,len(self.chain)):
currentBlock=self.chain[i]
prevBlock=self.chain[i-1]
if not currentBlock.hasValidTransactions(public_key=public_key):
return False
# Checks for change in data in current block
if currentBlock.hash!=currentBlock.generateHash():
return False
# Checks for broken link if the hash has been regenerated
if currentBlock.previousHash!=prevBlock.hash:
return False
return True
def __repr__(self) -> str:
return str({"BlockChain":{i:item for i,item in enumerate(self.chain)}}) | [
"rasesh.udayshetty@gmail.com"
] | rasesh.udayshetty@gmail.com |
c9925053e9d2f58580904a10e702cdfefea098dd | 8e59a43de9d427865c5d67fef39e9a50e44f07ce | /ppstructure/table/tablepyxl/tablepyxl.py | ba3cc0fc499fccd93ffe3993a99296bc6603ed8a | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleOCR | af87db8a804b9a4f4eac8a0b2faf80d1dd71633a | 15963b0d242867a4cc4d76445626dc8965509b2f | refs/heads/release/2.7 | 2023-09-01T04:53:37.561932 | 2023-08-30T02:22:15 | 2023-08-30T02:22:15 | 262,296,122 | 34,195 | 7,338 | Apache-2.0 | 2023-09-14T06:08:11 | 2020-05-08T10:38:16 | Python | UTF-8 | Python | false | false | 4,100 | py | # Do imports like python3 so our package works for 2 and 3
from __future__ import absolute_import
from lxml import html
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
from premailer import Premailer
from tablepyxl.style import Table
def string_to_int(s):
if s.isdigit():
return int(s)
return 0
def get_Tables(doc):
tree = html.fromstring(doc)
comments = tree.xpath('//comment()')
for comment in comments:
comment.drop_tag()
return [Table(table) for table in tree.xpath('//table')]
def write_rows(worksheet, elem, row, column=1):
"""
Writes every tr child element of elem to a row in the worksheet
returns the next row after all rows are written
"""
from openpyxl.cell.cell import MergedCell
initial_column = column
for table_row in elem.rows:
for table_cell in table_row.cells:
cell = worksheet.cell(row=row, column=column)
while isinstance(cell, MergedCell):
column += 1
cell = worksheet.cell(row=row, column=column)
colspan = string_to_int(table_cell.element.get("colspan", "1"))
rowspan = string_to_int(table_cell.element.get("rowspan", "1"))
if rowspan > 1 or colspan > 1:
worksheet.merge_cells(start_row=row, start_column=column,
end_row=row + rowspan - 1, end_column=column + colspan - 1)
cell.value = table_cell.value
table_cell.format(cell)
min_width = table_cell.get_dimension('min-width')
max_width = table_cell.get_dimension('max-width')
if colspan == 1:
# Initially, when iterating for the first time through the loop, the width of all the cells is None.
# As we start filling in contents, the initial width of the cell (which can be retrieved by:
# worksheet.column_dimensions[get_column_letter(column)].width) is equal to the width of the previous
# cell in the same column (i.e. width of A2 = width of A1)
width = max(worksheet.column_dimensions[get_column_letter(column)].width or 0, len(table_cell.value) + 2)
if max_width and width > max_width:
width = max_width
elif min_width and width < min_width:
width = min_width
worksheet.column_dimensions[get_column_letter(column)].width = width
column += colspan
row += 1
column = initial_column
return row
def table_to_sheet(table, wb):
"""
Takes a table and workbook and writes the table to a new sheet.
The sheet title will be the same as the table attribute name.
"""
ws = wb.create_sheet(title=table.element.get('name'))
insert_table(table, ws, 1, 1)
def document_to_workbook(doc, wb=None, base_url=None):
"""
Takes a string representation of an html document and writes one sheet for
every table in the document.
The workbook is returned
"""
if not wb:
wb = Workbook()
wb.remove(wb.active)
inline_styles_doc = Premailer(doc, base_url=base_url, remove_classes=False).transform()
tables = get_Tables(inline_styles_doc)
for table in tables:
table_to_sheet(table, wb)
return wb
def document_to_xl(doc, filename, base_url=None):
"""
Takes a string representation of an html document and writes one sheet for
every table in the document. The workbook is written out to a file called filename
"""
wb = document_to_workbook(doc, base_url=base_url)
wb.save(filename)
def insert_table(table, worksheet, column, row):
if table.head:
row = write_rows(worksheet, table.head, row, column)
if table.body:
row = write_rows(worksheet, table.body, row, column)
def insert_table_at_cell(table, cell):
"""
Inserts a table at the location of an openpyxl Cell object.
"""
ws = cell.parent
column, row = cell.column, cell.row
insert_table(table, ws, column, row) | [
"zjwenmu@gmail.com"
] | zjwenmu@gmail.com |
7c6646d07318d4d90843019252fd6669ee456fc9 | 0bc794345a18c312f1a937c33c78bf215f38aa86 | /resnet50-mushrooms/pods/__init__.py | 0b6c274fbccd56610d3d019d4bd0761fef73b363 | [
"Apache-2.0"
] | permissive | leonwanghui/mindspore-jina-apps | a1ea220461e7e38da2b21e18988c844282ae6afb | e2912d9a93689c69005345758e3b7a2f8ba6133e | refs/heads/main | 2023-01-22T11:57:15.698571 | 2020-11-25T04:48:34 | 2020-11-25T04:48:34 | 313,174,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | import numpy as np
from jina.executors.encoders.frameworks import BaseMindsporeEncoder
class MindsporeResNet50(BaseMindsporeEncoder):
"""
:class:`MindsporeResNet50` Encoding image into vectors using mindspore.
"""
def encode(self, data, *args, **kwargs):
from mindspore import Tensor
data = np.pad(data.reshape([-1, 3, 224, 224]),
[(0, 0), (0, 0), (0, 4), (0, 4)]).astype('float32')
return self.model(Tensor(data)).asnumpy()
def get_cell(self):
from .resnet.src.resnet import ResNet50
class ResNet50Embed(ResNet50):
def construct(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
c1 = self.maxpool(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
out = self.mean(c5, (2, 3))
out = self.flatten(out)
return out
return ResNet50Embed()
| [
"wanghui71leon@gmail.com"
] | wanghui71leon@gmail.com |
68d63a1d6f76afc3108f0986e47f3d49dfec9e21 | af728a099aa77fe8e6a94ef33f46a80adcdd7d3b | /questao5_lista3_media.py | 7c90f2fa357f27ba5ac9c3b85f74794ab00ba533 | [] | no_license | pedrohenriqueads/progr1ads | 9a7fe9bab0a9b9c39230c09771dae6674f620240 | 44a1f87041fd50208775ab959886b527142eb0ff | refs/heads/master | 2021-02-19T05:58:01.304798 | 2020-05-25T00:24:33 | 2020-05-25T00:24:33 | 245,282,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | #5 - Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar:
#- A mensagem "Aprovado", se a média alcançada for maior ou igual a sete;
#- A mensagem "Reprovado", se a média for menor do que sete;
#- A mensagem "Aprovado com Distinção", se a média for igual a dez.'
nota1 = float(input("Digite a primeira nota: "))
nota2 = float(input("Digite a segunda nota: "))
soma = (nota1 + nota2) /2
if soma >= 7 and soma < 10:
print("Aprovado")
elif soma == 10:
print("Aprovado com Distinção")
else:
print("Reprovado")
if soma <= 10:
media = int(input("Deseja ver a média? (1)-Sim (2)-Não: "))
if media == 1:
print("Sua média é:", soma)
elif media == 2:
print("programa encerrado")
else:
print("Error")
else:
print("Ops, operação inválida")
| [
"pedrosilva@ads.fiponline.edu.br"
] | pedrosilva@ads.fiponline.edu.br |
d444067745e1329023ab1633378114431728b1dc | eb65d7c79561ec855c36909f91dee8fe83e05b63 | /sendsignal.py | e19473224d25296de1c4220daf13176c158d5446 | [] | no_license | ycarissan/sendsignal | d2058f07acc4d0c8a766057143b2163820bba651 | ff9e8a1e53908e52bd274f201bcafd6effaf807e | refs/heads/master | 2020-06-19T18:23:35.838932 | 2019-07-14T10:04:43 | 2019-07-14T10:04:43 | 196,820,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import RPi.GPIO as GPIO
import time
PIN = 13
SIG1 = "1011011001001011001001001001001001011"
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN, GPIO.OUT)
GPIO.output(PIN, GPIO.LOW)
def send(msg):
for i in range(10):
for s in msg:
# print(s)
if s=="0":
GPIO.output(PIN, GPIO.LOW)
elif s=="1":
GPIO.output(PIN, GPIO.HIGH)
time.sleep(0.000300)
GPIO.output(PIN, GPIO.LOW)
time.sleep(0.011000)
def main():
print('debut')
setup()
CMD=1
while (1):
if (CMD == 1):
send(SIG1)
CMD=0
elif (CMD==0):
time.sleep(1)
else:
print('signal inconnu')
main()
| [
"ycarissan@gmail.com"
] | ycarissan@gmail.com |
d5c5c47e59e9a5bc56b001df5aa50bcd31f4ad79 | ab8117bc5b5040e5107fc59337fabc966cb062ba | /.history/twitter/engine_20200328094821.py | ee9b4bd9c70c405259d659015ba54699abc23b6b | [] | no_license | mirfarzam/DownloaderBro | 6019ab561c67a397135d0a1585d01d4c6f467df4 | 8e0a87dd1f768cfd22d24a7f8c223ce968e9ecb6 | refs/heads/master | 2022-04-16T15:31:38.551870 | 2020-04-15T17:36:26 | 2020-04-15T17:36:26 | 255,090,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py |
import tweepy
import datetime
import configparser
import time
config = configparser.ConfigParser()
config.read('credential.conf')
consumer_key = config['API']["API_key"]
consumer_secret = config['API']["API_secret_key"]
access_token = config['ACCESS']["Access_token"]
access_token_secret = config['ACCESS']["Access_token_secert"]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# api.verify_credentials()
def check_mentions(api, keywords, since_id):
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline,
since_id=since_id).items():
new_since_id = max(tweet.id, new_since_id)
if tweet.in_reply_to_status_id is None:
continue
main = (api.statuses_lookup([tweet.in_reply_to_status_id], include_entities=True ))[0]
try :
if 'media' in main.extended_entities:
for video in main.extended_entities['media'][0]['video_info']['variants']:
videos = {}
try:
print(f"{video['bitrate']} and is {video['url']}")
except:
print(f"Error in finding video in tweet id : {main.id}")
# if 'variants' in main.extended_entities['media'][0]:
# for video in main.extended_entities['media'][0]['variants']:
# if 'bitrate' in video:
# print(f"{video['bitrate']} and is {video['url']}")
except:
print(f"Cannot get Tweet video and tweet id is : {main.id}")
return new_since_id
since_id = 1
while True:
since_id = check_mentions(api, ["help", "support"], since_id)
time.sleep(5) | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
941ce41828fd22ce8c301d1e665d3e4c93720016 | 7a2dc7f1c9b75f06cdff21170620176a54cb766e | /ClassFIT_AI/Feature_extract/validator.py | e3cbbe8a17734613475bca2281ba2d8341d944ca | [] | no_license | kokong1231/KSJ_project | 2f18c2f9dad6a5d044fdfec46c58c570fff6ce3f | 99dea21e9b674087962e773eab58b842ffc14d6e | refs/heads/master | 2023-02-03T22:43:36.334673 | 2020-12-22T11:50:53 | 2020-12-22T11:50:53 | 323,610,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from w3c_validator import validate
def valid_error_count(url_):
try:
messages = validate(url_)['messages']
if len(messages) >= 10:
return 1
else:
return 0
except:
return 1
| [
"kokong1231@gmail.com"
] | kokong1231@gmail.com |
6c6c348d6824f60b9373434921e7172901d8c8ce | c2f85e6867960a98c72420b8fc79547d67ce4e6b | /Desktop/project-samaritan1011001-master/osna/mytwitter.py | 294a04024acd498dd8e49f3b8d683135b188dc29 | [] | no_license | sahanashreedhar27/ClassificationOfTwitters | f9f0ca96435ca937d66b2391a9bea27e518e8510 | 7db8cfb3a8c8280db45a9122ff6461732884965b | refs/heads/master | 2021-01-16T12:39:27.254198 | 2020-02-26T01:59:58 | 2020-02-26T01:59:58 | 243,125,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,692 | py | """
Wrapper for Twitter API.
"""
import os
from itertools import cycle
import sys
import time
import pandas as pd # Data manipulation
import re, json
import tweepy as tp # API to interact with twitter
from collections import defaultdict
RATE_LIMIT_CODES = set([88, 130, 420, 429])
class Twitter:
def __init__(self, credential_file, directory=''):
"""
Params: credential_file...list of JSON objects containing the four
required tokens: consumer_key, consumer_secret, access_token, access_secret
"""
self.credentials = [json.loads(l) for l in open(credential_file)]
self.credential_cycler = cycle(self.credentials)
self.reinit_api()
self.directory = directory
def reinit_api(self):
# creds = next(self.credential_cycler)
auth = self.authenticate_twitter_app()
self.twapi = tp.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
def authenticate_twitter_app(self):
creds = next(self.credential_cycler)
sys.stderr.write('switching creds to %s\n' % creds['consumer_key'])
# Authentication
consumer_key = creds['consumer_key']
consumer_secret = creds['consumer_secret']
auth = tp.OAuthHandler(consumer_key, consumer_secret)
# token stuff
access_token = creds['access_token']
access_token_secret = creds['token_secret']
auth.set_access_token(access_token, access_token_secret)
return (auth)
def get_user_timeline_tweets(self, twitter_client, user_list, num_tweets):
'''
Uses Tweepy's cursor method to fetch a user's timeline tweets.
:param twitter_client: the twitter client to use.
:param user_list: List of users to collect data from.
:param num_tweets: Number of tweets from user to collect
:return: List of tweets
'''
tweets = []
for user in user_list:
print(f'Getting {num_tweets} tweets for {user}. ', end='')
try:
for tweet in tp.Cursor(twitter_client.user_timeline, id=user).items(num_tweets):
tweets.append(tweet)
except tp.RateLimitError:
print(f'SLEEPING DUE TO RATE LIMIT ERROR!!!!')
time.sleep(15 * 60)
except Exception as e:
print(f'SOME ERROR OCCURRED...PASSING!!!')
print(e.__doc__)
pass
return (tweets)
# Helper function to get fixed number of tweets and put in results
def get_tweets(self, twitter_client, v_user_list, num_tweets):
'''
Fetch tweets from user timeline and return dataframes
:param twitter_client: the twitter client to use.
:param v_user_list: List of users to collect data from.
:param num_tweets: Number of tweets from user to collect
:return: Dictonary of tweets and users dataframes
'''
statuses = self.get_user_timeline_tweets(twitter_client, v_user_list, num_tweets)
# Create list to write to json file
tweet_LoD, user_LoD = self.produce_status_LoDs(statuses)
tweet_df = pd.DataFrame(tweet_LoD)
user_df = pd.DataFrame(user_LoD)
return (tweet_df, user_df)
def produce_status_LoDs(self, statuses):
'''
Read in lists of statuses. Organize them.
Args:
list - statuses
Returns:
Cleaned dataframe, with an extra column: 'known_bot' = False
'''
tweet_LoD = []
user_LoD = []
for status in statuses:
tweet_dict = {}
user_dict = {}
tweet_dict['user_id'] = status.author.id
tweet_dict['user_screen_name'] = status.author.screen_name
tweet_dict['created_at'] = str(status.created_at)
tweet_dict['id'] = status.id
tweet_dict['id_str'] = status.id_str
tweet_dict['text'] = status.text
tweet_dict['source'] = status.source
tweet_dict['truncated'] = status.truncated
tweet_dict['retweet_count'] = status.retweet_count
tweet_dict['favorite_count'] = status.favorite_count
tweet_dict['lang'] = status.lang
tweet_dict['is_tweet'] = ((re.search('RT', status.text) == None))
tweet_LoD.append(tweet_dict)
# user data
user_dict['name'] = status.author.name
user_dict['screen_name'] = status.author.screen_name
user_dict['description'] = status.author.description
user_dict['followers_count'] = status.author.followers_count
user_dict['location'] = status.author.location
user_dict['friends_count'] = status.author.friends_count
user_dict['listed_count'] = status.author.listed_count
user_dict['favourites_count'] = status.author.favourites_count
user_dict['statuses_count'] = status.author.statuses_count
user_dict['has_bio'] = bool(user_dict['description'] not in ['NULL', 'NaN', '', ' ', pd.np.nan])
user_dict['followers_count_gr_30'] = bool(user_dict['followers_count'] >= 30)
user_dict['followers_2_times_ge_friends'] = bool(
2 * user_dict['followers_count'] >= user_dict['friends_count'])
user_dict['bot_in_biography'] = bool(
type(user_dict['description']) is str and 'bot' in user_dict['description'].lower())
user_dict['ratio_friends_followers_around_100'] = bool(
user_dict['followers_count'] > 0 and 80.0 <= float(user_dict['friends_count']) / user_dict[
'followers_count'] >= 120.0)
user_dict['no_location'] = bool(user_dict['location'] in ['NULL', 'NaN', '', ' ', pd.np.nan])
if status.author.verified:
user_dict['known_bot'] = False
else:
user_dict['known_bot'] = False
if not any(d.get('id', None) == status.author.id for d in user_LoD):
user_LoD.append(user_dict)
print(f'number of tweets: {len(tweet_LoD)} and number of users: {len(user_LoD)} collected.')
return (tweet_LoD, user_LoD)
def produce_bot_LoDs(self, bots, for_bots=False):
'''
Read in dataframe of bots. Organize them.
Args:
dataframes - Bots
Returns:
Cleaned dataframe, with an extra column: 'known_bot' = True
'''
user_colnames = ['name', 'screen_name', 'description', 'location',
'friends_count', 'favourites_count', 'followers_count', 'listed_count',
'statuses_count', 'has_bio', 'followers_count_gr_30', 'bot_in_biography',
'no_location', 'followers_2_times_ge_friends', 'ratio_friends_followers_around_100',
'known_bot']
bots['has_bio'] = bots['description'].apply(
lambda x: False if str(x) not in ['NULL', 'NaN', '', ' ', pd.np.nan] else True)
bots['followers_count_gr_30'] = bots['followers_count'].apply(lambda x: False if int(x) >= 30 else True)
bots['bot_in_biography'] = bots['description'].apply(
lambda x: False if type(x) is str and 'bot' in x.lower() else True)
bots['no_location'] = bots['location'].apply(
lambda x: False if x in ['NULL', 'NaN', '', ' ', pd.np.nan] else True)
bots['followers_2_times_ge_friends'] = bots.apply(
lambda row: True if 2 * row['followers_count'] >= row['friends_count'] else False, axis=1)
bots['ratio_friends_followers_around_100'] = bots.apply(lambda row: True if row['followers_count'] > 0 and
80.0 <= float(
row['friends_count']) / row['followers_count'] >= 120.0 else False, axis=1)
bots['known_bot'] = True
bots_output = bots[user_colnames]
if for_bots:
return (bots_output)
else:
return (bots_output.head(740))
def fetch_bot_dataset_and_store(self):
"""
Gathers genuine user data and processes from the local dataset located at project-samaritan1011001/osna/data/social_spambots_1.csv/users.csv
:return: Nothing. Creates a file called b_user_table_out.json
"""
bots = pd.read_csv("project-samaritan1011001/osna/data/social_spambots_1.csv/users.csv")
bots_Clean = self.produce_bot_LoDs(bots, for_bots=True)
print(f'Number of bots collected {len(bots_Clean)}')
user_json = bots_Clean.to_json(orient='records')
with open(self.directory + os.path.sep + 'b_user_table_out.json', 'w') as outfile:
json.dump(user_json, outfile)
def fetch_genuine_dataset_and_store(self):
"""
Gathers genuine user data and processes from the local dataset located at project-samaritan1011001/osna/data/genuine_accounts.csv/users.csv
:return: Nothing. Creates a file called g_user_table_out.json
"""
g_users = pd.read_csv("project-samaritan1011001/osna/data/genuine_accounts.csv/users.csv")
g_users_Clean = self.produce_bot_LoDs(g_users)
print(f'Number of g users {len(g_users_Clean)}')
user_json = g_users_Clean.to_json(orient='records')
with open(self.directory + os.path.sep + 'g_user_table_out.json', 'w') as outfile:
json.dump(user_json, outfile)
def fetch_v_user_and_store(self, v_user_list, num_tweets):
"""
Collects verified user data from the Twitter API and processes it
:return: Nothing. Creates a file called v_user_table_out.json
"""
# Get verified users, write them to HD
v_tweet_df, v_user_df = self.get_tweets(self.twapi, v_user_list, num_tweets)
print("Number of verified users COLLECTED: {}".format(len(v_user_df)))
user_json = v_user_df.to_json(orient='records')
tweet_json = v_tweet_df.to_json(orient='records')
with open(self.directory + os.path.sep + 'v_tweet_table_out.json', 'w') as outfile:
json.dump(tweet_json, outfile)
with open(self.directory + os.path.sep + 'v_user_table_out.json', 'w') as outfile:
json.dump(user_json, outfile)
def fetch_nv_user_and_store(self, nv_user_list, num_tweets):
"""
Collects unverified user data from the Twitter API and processes it
:return: Nothing. Creates a file called nv_user_table_out.json
"""
# Get unverified users, write them to HD
nv_tweet_df, nv_user_df = self.get_tweets(self.twapi, nv_user_list, num_tweets)
print("Number of unverified users COLLECTED: {}".format(len(nv_user_df)))
user_json = nv_user_df.to_json(orient='records')
tweet_json = nv_tweet_df.to_json(orient='records')
with open(self.directory + os.path.sep + 'nv_tweet_table_out.json', 'w') as outfile:
json.dump(tweet_json, outfile)
with open(self.directory + os.path.sep + 'nv_user_table_out.json', 'w') as outfile:
json.dump(user_json, outfile)
def merge_bot_user_datasets(self):
"""
Merges all the collected data into one json
:return: Nothing. Creates a file called final_user_master.json
"""
with open(self.directory + os.path.sep + 'v_tweet_table_out.json') as json_file:
v_tweet_json = json.load(json_file)
with open(self.directory + os.path.sep + 'v_user_table_out.json') as json_file:
v_user_json = json.load(json_file)
with open(self.directory + os.path.sep + 'nv_tweet_table_out.json') as json_file:
nv_tweet_json = json.load(json_file)
with open(self.directory + os.path.sep + 'nv_user_table_out.json') as json_file:
nv_user_json = json.load(json_file)
with open(self.directory + os.path.sep + 'g_user_table_out.json') as json_file:
g_user_json = json.load(json_file)
v_tweet_df = pd.read_json(v_tweet_json)
v_user_df = pd.read_json(v_user_json)
nv_tweet_df = pd.read_json(nv_tweet_json)
nv_user_df = pd.read_json(nv_user_json)
g_user_df = pd.read_json(g_user_json)
# Merging v_users and nv_users
user_df = nv_user_df.append(v_user_df, sort=False) # , ignore_index=True)
tweet_df = nv_tweet_df.append(v_tweet_df) # , ignore_index=True)
with open(self.directory + os.path.sep + 'b_user_table_out.json') as json_file:
user_json = json.load(json_file)
bots_Clean = pd.read_json(user_json)
final_user_df = user_df.append(bots_Clean, sort=False)
final_user_df = final_user_df.append(g_user_df, sort=False)
print(f'TOTAL NUMBER OF USERS COLLECTED -> {len(final_user_df)}')
with open(self.directory + os.path.sep + 'final_user_master.json', 'w') as outfile:
json.dump(final_user_df.to_json(orient='records'), outfile)
def findCommonNeighbors(self, influential_users):
'''
Finds common neighbors for a given list of influential users
:param influential_users: List of users
:return: List of common neighbors and a dictionary used to make the graph
'''
user_followers_dict = defaultdict(list)
for inf_user in influential_users:
try:
for item in tp.Cursor(self.twapi.followers_ids, id=inf_user).items(limit=750):
user_followers_dict[inf_user].append(item)
except tp.TweepError:
print("tweepy.TweepError=", tp.TweepError) # tweepy.TweepError
except:
e = sys.exc_info()[0]
print("Error: %s" % e) # print "error."
ll_neighbors = [x for x in user_followers_dict.values()]
result = set(ll_neighbors[0]).intersection(*ll_neighbors)
final_nodes = list(result)
edges_list_dict = []
labels = {}
for in_user in influential_users:
final_nodes.append(in_user)
labels[in_user] = in_user
for i, y in enumerate(list(result)):
edges_list_dict.append((in_user, y))
return list(result), edges_list_dict
def fetch_test_data(self, test_list, num_tweets):
'''
Fetchs data for the given test user list
:param test_list: List of users.
:param num_tweets: Number of tweets to fetch for each user.
:return: A josn object with the users' data.
'''
test_df, test_user_df = self.get_tweets(self.twapi, test_list, num_tweets)
tweet_json = test_df.to_json(orient='records')
user_json = test_user_df.to_json(orient='records')
return user_json
# EXTRA FUNCTIONS : IGNORE
# def user_info_for_screen_name(self, screen_name):
# response = self.twapi.get_user(screen_name)
# print(f' user info -> {response.followers_count}')
# return response.followers_count
#
# def followers_count_for_users(self, users_list):
# reach_count = 0
# users = self.twapi.lookup_users(users_list)
# for user in users:
# reach_count += user.followers_count
# return reach_count
#
# def get_all_statuses(self, screen_name):
# statuses = []
# for status in tp.Cursor(self.twapi.user_timeline, screen_name=screen_name, tweet_mode="extended").items():
# statuses.append(status)
# print(f'Total number of statuses retreived -> {len(statuses)}')
# # print(f'A status -> {[x.id for x in statuses if x.retweet_count > 0]}')
# statuses_rt_ge_0 = [x.id for x in statuses if x.retweet_count > 0]
# print(f'Total number of statuses_rt_ge_0 retreived -> {len(statuses_rt_ge_0)}')
#
# statuses_rt_ge_0 = statuses_rt_ge_0[:75]
# return self.get_retweeters_id_for_statuses(statuses_rt_ge_0)
#
# def get_retweeters_id_for_statuses(self, statuses):
# retweeters_ids = []
# reach_count = 0
# for status_id in statuses:
# for retweeters in tp.Cursor(self.twapi.retweeters, id=status_id, tweet_mode="extended").pages():
# print(f'Type -> {type(retweeters)}')
# retweeters_ids.append(retweeters)
# reach_count += self.followers_count_for_users(retweeters)
# # self.user_info_for_screen_name()
# print(f'Total number of retweeters retreived -> {retweeters_ids[0]}')
# print(f'reach_count -> {reach_count}')
# return reach_count
| [
"sahanashreedharkulkarni@dhcp56.merusouth115.iit.edu"
] | sahanashreedharkulkarni@dhcp56.merusouth115.iit.edu |
9855860eeee26a97c487f030f08eba7c367d287f | 4b4828d3c98d76d7bf38f90a015945acc408ddc5 | /PythonAI/Practice/DAY03/src/URL_Parsing_02.py | 0ef5fb5e164d8ee32d3ebfbbf7f7046114d8a105 | [] | no_license | Huh-jae-won/Study | cb5d32728e8dcded492e7edb054b500c91ec607c | e4dbc3fef69bb273b62b866fb5ef2a7250222f10 | refs/heads/main | 2023-06-20T13:06:26.691899 | 2021-07-11T07:43:41 | 2021-07-11T07:43:41 | 362,759,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import sys
import urllib.request as req
import urllib.parse as parse
# 명령줄 매개변수 추출
if len(sys.argv) <= 1 :
print("USAGE : download-forecast-argv <Region Number>")
sys.exit()
regionNumber = sys.argv[1]
# 매개변수를 URL 인코딩
API = "http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp"
values = { 'stnid': regionNumber }
params = parse.urlencode(values)
url = API + "?" + params
print("url=", url) | [
"dfr9034@naver.com"
] | dfr9034@naver.com |
d1c882cd7518d64e6ab5ef5850cee5f124f725c9 | c9523078603d63c219b438e4e990cff83d59a3f3 | /acoustic_sight_server/rpi_cam_client/remote_image_client.py | da5355f88cfb867f88581c1629ec813788581a32 | [
"MIT"
] | permissive | Sitin/acoustic-sight | 7797844ed388d533408de5d2f966ef916fd1395c | 5380fa61eba1f763b017ead7a65ccacdae45c821 | refs/heads/master | 2021-09-05T00:06:29.594204 | 2018-01-22T23:51:06 | 2018-01-22T23:51:06 | 104,977,176 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import abc
import io
from PIL import Image
import requests
from acoustic_sight_server.rpi_cam_client.image_retriever import ImageRetriever
class RemoteImageClient(ImageRetriever):
@abc.abstractmethod
def get_latest_image_url(self):
pass
def get_image(self):
url = self.get_latest_image_url()
r = requests.get(url)
temp_buff = io.BytesIO()
temp_buff.write(r.content)
temp_buff.seek(0)
return Image.open(temp_buff)
| [
"mikhail.zyatin@gmail.com"
] | mikhail.zyatin@gmail.com |
f154e1687ad528d8599e0ce4905b373e57a24492 | 38829037b34848d696f475af3978a1b7b1e4cbb4 | /app-cp/import.py | 128118da929a2c8e25da6d68602019e9aa6894d9 | [] | no_license | blackrez/bbl-openresty | 6f259e8503ed73a403103cdae3bd0e9c4d5fa79c | 2ed601cb9e7accc71fcb2d24eac4cff28267177d | refs/heads/master | 2020-04-24T21:06:34.360420 | 2019-02-28T21:06:47 | 2019-02-28T21:06:47 | 172,266,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | import csv
import mysql.connector
# cnx = mysql.connector.connect(user="root", database="codepostaux")
cnx = mysql.connector.connect(user="root", host="mysql-codepostaux")
try:
cursor = cnx.cursor()
except:
cnx.reconnect(attempts=5, delay=5)
create_db = ("CREATE DATABASE IF NOT EXISTS codepostaux;"
"USE codepostaux;")
cursor.execute(create_db)
add_cp = ("INSERT INTO codepostaux"
"(code_insee, nom_commune, code_postal, libelle, ligne_5, centroid) "
"VALUES (%s, %s, %s, %s, %s, ST_GeomFromText(%s, 4326))")
with open('/tmp/laposte_hexasmal.csv') as csvfile:
cpreader = csv.reader(csvfile, delimiter=";")
fitstline = True
for row in cpreader:
print(row)
if fitstline:
fitstline = False
else:
if row[5] == '':
row[5] = "POINT(0 0)"
else:
row[5] = "POINT({})".format(row[5].replace(',', ""))
try:
cursor.execute(add_cp, row)
except:
cnx.reconnect(attempts=5, delay=5)
cnx.commit()
cnx.close() | [
"nabil@eml.cc"
] | nabil@eml.cc |
4a7b9dedc846fd6a43c1ad6607a1221c06a5c760 | 5226d62f0ce18a86977b50251e7da42a9127cfb2 | /VAE/GUI.py | 7661595c6473d599dfed040fc5d6cfc9685e902b | [] | no_license | xutianyish/APS360-Face_generator | 6bfb1a55a5013f208823e096252d04e95a1ccea6 | 18a2f0dd4e08d1a34d566f77b30378cffa8d14e1 | refs/heads/master | 2022-04-10T03:23:58.344243 | 2020-03-15T17:33:23 | 2020-03-15T17:33:23 | 247,515,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | import random
import matplotlib
import pickle
import numpy as np
import tkinter as Tk
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from VAE_GAN import Encoder, Decoder, VAE
from data_loader import raw_loader
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import torch
'''
TODO: fix comments
This function is called everytime the slider values are changed
The all the slider values are stored in a list called "val", and
a face will be generated based on "val" and plotted on the canvas
'''
def get_model():
'''
TODO: parameterize all of this
'''
latent = 500
dilation=20
folder = '/home/osvald/Projects/APS360/APS360_Project/VAE/GAN_models/l_200_df_16_kld_0.5_b1_0.5_b2_0.999_lr_0.001_g_0.99_db_2_gw_1.5'
folder = "C:\\Users\\osval\\Documents\\School\\APS360\\APS360_Project\\VAE\\VAE_models\\l_500_df_20_kld_0.01_b1_0.9_b2_0.999_lr_0.001_g_0.99"
state = '\\model_epoch150'
model = VAE(d_factor=dilation, latent_variable_size=latent, cuda=False, activation='SELU').to('cpu')
model.load_state_dict(torch.load(folder + state, map_location='cpu'))
model.eval()
sc = pickle.load(open(folder+'\\std_scaler500.p', 'rb'))
pca = pickle.load(open(folder+'\\pca500.p', 'rb'))
pca_components = pickle.load(open(folder+'\\components500.p', 'rb'))
pca_mean = pickle.load(open(folder+'\\mean500.p', 'rb'))
return model, sc, pca_components, pca_mean, pca
model, sc, pca_components, pca_mean, pca = get_model()
def update(val):
val = []
for i in range(30):
val.append(s_time[i].val)
with torch.no_grad():
image = pca_to_img(val).squeeze().permute(1,2,0)
ax.imshow(image)
matplotlib.use('TkAgg')
root = Tk.Tk()
root.wm_title("VAE face generation")
fig = plt.Figure()
canvas = FigureCanvasTkAgg(fig, root)
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#TODO: fix problem of PCA output differing from VAE reconstruction
# likely to do with reparameterization - try sampling from Gaussian w/ std logvar
#TODO: try with more PCs but just list the first 20
#TODO: SR cycleGAN
loader = raw_loader(batch_size=1)
with torch.no_grad():
for data, _ in loader:
inputs = data
mu, logvar = model.encode(inputs)
latent = sc.transform(mu)# + logvar)
pc = pca.transform(mu)#np.dot(latent-pca_mean, np.transpose(pca_components))
#z = np.dot(pc[0], pca_components) + pca_mean
#z = sc.inverse_transform(z)
image = model.decode(torch.Tensor(mu)).squeeze().permute(1,2,0)
#TODO: reshape pc to remove batch dim
break
def pca_to_img(val, pc=pc[0], model=model, sc=sc, components=pca_components, mean=pca_mean):
'''
Transforms Principle components to latent distrribution
'''
for i in range(len(val)):
pc[i] = val[i]
z = np.dot(pc, components) + mean
z = sc.inverse_transform(z)
img = model.decode(torch.Tensor(z))
return(img)
ax=fig.add_subplot(122)
ax.imshow(image)
s_time = []
for i in range (30):
ax_time = fig.add_axes([0.05, 0.1+0.03*i, 0.4, 0.02])
s_time.append(Slider(ax_time, str(i), -10, 10, valinit=pc[0,i]))
s_time[i].on_changed(update)
Tk.mainloop()
| [
"noreply@github.com"
] | xutianyish.noreply@github.com |
425319ec3b336304124a9f9fa80717eb5bd98271 | 0c1bc3cfe6218e255370a2ad50a163749ac99ae8 | /Day-5/Program-2.py | 7fc2f80c4f49a80ded84da5e7c5259920b5abc4a | [] | no_license | mukund2000/week-of-code | 01cb32e8d8f128c7beb5e77f2f87994418d95213 | 949e1c8653635f67397fcb3d62af42354248274d | refs/heads/master | 2022-09-15T17:35:22.321806 | 2020-05-30T17:13:41 | 2020-05-30T17:13:41 | 264,487,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 22 16:36:40 2020
@author: Mukund Rastogi
"""
def ReplaceWithGreatest(List):
for i in range(len(List)-1):
List[i]=max(List[i+1:])
return List
n=int(input("Enter the size of list"))
List=list(map(int,input("Enter the elements in list: ").split()))
print("After sorting The list is: ",ReplaceWithGreatest(List)) | [
"mukundrastogixyz@gmail.com"
] | mukundrastogixyz@gmail.com |
66fb33b0030c894e919d60edb6cc528e910809b4 | 8cce0b5a4be09783016906a36192c52e9daa84aa | /equipment_engineering/meter_reader_4_pointer/main.py | d3851c2d25d932f0aa84ba3adc0c8e32b8fd3a3b | [
"MIT"
] | permissive | Castrol68/opencv-practice | fcc9495553d3a10fb045c396697391a5d2a06f36 | 83d76132d004ebbc96d99d34a0fd3fc37a044f9f | refs/heads/master | 2023-08-31T07:18:51.497902 | 2020-05-03T17:43:12 | 2020-05-03T17:43:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import abc
from abc import ABC
import argparse
import os
class EquipmentRunner(ABC):
def __init__(self):
self.__successor = None
@property
def mode(self):
if not self.__successor:
exit(404)
return self.__successor
@mode.setter
def mode(self, successor):
self.__successor = successor
@abc.abstractmethod
def run(self, request): ...
class RunSettingMode(EquipmentRunner):
def run(self, request):
if True is request["set"]:
try:
log("设置模式 ...")
min_angle = input_number_check("表盘最小值对应的刻度")
max_angle = input_number_check("表盘最大值对应的刻度")
min_value = input_number_check("表盘最小值")
max_value = input_number_check("表盘最大值")
util = input("仪表单位: ")
set_detector_argument(min_angle, max_angle, min_value, max_value, util)
except Exception as e:
log(e, ERROR)
else:
self.next.run(request)
class RunDebugMode(EquipmentRunner):
def run(self, request):
if True is request["debug"]:
try:
log("调试模式 ...")
start_with_debug()
except Exception as e:
log(e, ERROR)
else:
self.next.run(request)
class RunVisionMode(EquipmentRunner):
def run(self, request):
if True is request["windows"]:
try:
log("可视化模式 ...")
start_with_vision()
except Exception as e:
log(e, ERROR)
class RunBackendMode(EquipmentRunner):
def run(self, request):
if True is request["backend"]:
try:
log("后台模式 ...")
start_with_backend()
except Exception as e:
log(e, ERROR)
else:
self.next.run(request)
def fork():
setting_mode = RunSettingMode()
debug_mode = RunDebugMode()
vision_mode = RunVisionMode()
backend_mode = RunBackendMode()
setting_mode.next = debug_mode
debug_mode.next = backend_mode
backend_mode.next = vision_mode
# try:
# os.chdir("/tmp")
# os.setsid()
# os.umask(0)
setting_mode.run(args)
# except OSError:
# pass
if "__main__" == __name__:
from reader_4_pointer import start_with_vision, start_with_debug, set_detector_argument, start_with_backend
from reader_4_pointer import version, log, ERROR, input_number_check
version()
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--debug", type=bool, help=" debug模式", default=False)
ap.add_argument("-s", "--set", type=bool, help="设置模式", default=False)
ap.add_argument("-w", "--windows", type=bool, help="可视化模式", default=True)
ap.add_argument("-b", "--backend", type=bool, help="后台模式", default=False)
ap.add_argument("-p", "--path", help="日志存放位置")
args = vars(ap.parse_args())
fork()
| [
"afterloe@foxmail.com"
] | afterloe@foxmail.com |
09c541212c844618f2b459be9a4d1d7638285512 | 794c0856478e25e772c356f4278754b45dc0b35e | /speedtest.py | 8c584b3d368a26f4d3784fa62bfd081a9a907e5e | [] | no_license | MMotohiro/code_of_Atcoder | 4bb06fc209052089b4898d5386c22910463a1fcb | faf136716c5951a1cb3bd5ee43060ccfbf2f2188 | refs/heads/master | 2020-09-04T13:38:34.510416 | 2019-11-05T13:09:15 | 2019-11-05T13:09:15 | 219,746,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | import time
import random
N, M = map(int, input().split()) #N個の頂点のグラフ、M回ランダムアクセス
#LRD = [[random.randrange(0, N), random.randrange(0, N),random.randrange(0, N)] for i in range(M)]
LRD = [[5,5,5] for i in range(M)]
#N,M 説明
print(f"{N}x{N}配列,{M}回ランダムアクセス")
print()
# リストオンリー
if N <= 10000:
print("リストオンリー")
start = time.time()
graph = [[0 for _ in range(N)] for _ in range(N)]
elapsed_time = time.time() - start
print(f"初期化:{elapsed_time:6f}秒")
start = time.time()
for L, R, D in LRD:
graph[L][R] = D
#graph[R][L] = -D
elapsed_time = time.time() - start
print(f"ランダムアクセス:{elapsed_time:6f}秒")
print()
print(f"{N*N}x{1}配列,{M}回ランダムアクセス")
print("リストオンリー")
start = time.time()
graph = [0 for _ in range(N*N)]
elapsed_time = time.time() - start
print(f"初期化:{elapsed_time:6f}秒")
start = time.time()
for L, R, D in LRD:
graph[L*R] = D
#graph[R][L] = -D
elapsed_time = time.time() - start
print(f"ランダムアクセス:{elapsed_time:6f}秒")
print()
del start
del graph
| [
"noreply@github.com"
] | MMotohiro.noreply@github.com |
4af73eaabb0d414ab32303393cf915677d331707 | c940c2477cd90bb6702760688a0ac11c36184e8d | /blog/views.py | f15925975f732b9e57b368c50de6f0404e51c279 | [] | no_license | hotdog45/myblog | 4bd4e4f65f6aad8839d8d970ea9e01ba885b8843 | 768d645c0f3f7ab7fdadd83208d33128417852dd | refs/heads/master | 2020-12-02T22:40:11.014410 | 2017-07-18T17:16:57 | 2017-07-18T17:16:57 | 96,162,906 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render
# from django.http import HttpResponse
from . import models
def index(request):
articles = models.Article.objects.all()
return render(request,'index.html',{'articles':articles})
def article_page(request,article_id):
article = models.Article.objects.get(pk=article_id)
return render(request,'article_page.html',{'article':article})
def edit_page(request,article_id):
if str(article_id) == '0':
return render(request,'edit_page.html')
article = models.Article.objects.get(pk=article_id)
return render(request,'edit_page.html',{'article':article})
def edit_action(request):
title = request.POST.get('title','TITLE')
content = request.POST.get('content','CONTENT')
article_id = request.POST.get('article_id',0)
if article_id == '0':
models.Article.objects.create(title=title,content=content)
articles = models.Article.objects.all()
return render(request,'index.html',{'articles':articles})
article = models.Article.objects.get(pk=article_id)
article.title = title
article.content = content
article.save()
return render(request,'article_page.html',{'article':article}) | [
"mac2020@126.com"
] | mac2020@126.com |
df75c53b7d981cf6d3e3ab0994eb1d59a0352387 | 5fe0a5588d27dfcd98e1ebedbf8a2138dcb33117 | /catkin_ws/build/hebi_motor/catkin_generated/pkg.develspace.context.pc.py | b25b5e76901979d9047e62786209734c711542fd | [] | no_license | Florian9638/HEBI_ros | 370511dbeca00c698afea8a969960094b1678930 | 7f7b46b329aec043019a79d7e67fe1c821f20684 | refs/heads/main | 2023-02-06T04:19:48.506795 | 2020-12-15T21:16:13 | 2020-12-15T21:16:13 | 319,692,741 | 1 | 0 | null | 2020-12-15T21:16:14 | 2020-12-08T16:14:07 | Makefile | UTF-8 | Python | false | false | 532 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/florian/Aigritec/X8-9/catkin_ws/devel/include".split(';') if "/home/florian/Aigritec/X8-9/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "hebi_cpp_api;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hebi_motor"
PROJECT_SPACE_DIR = "/home/florian/Aigritec/X8-9/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"florian.dallago@aigritec.com"
] | florian.dallago@aigritec.com |
0ecfd4ade7a4eb7a0332e62085aa20d4d147faea | 0550c08cee19be891fde34fa109b5a4ad9f07e3a | /countingdnanucleotides/countingdnanucleotides.py | f3f714d93042c72c14090a87793d10895a8e4cca | [] | no_license | bendavidsteel/rosalind-solutions | 92653c49d8ef938306ac1289ccb4e4cfe4b8d3ae | 0749f2662efcac62383a8476ce13fcdd039928b1 | refs/heads/master | 2020-03-28T04:17:00.959446 | 2018-09-06T21:32:06 | 2018-09-06T21:32:06 | 147,705,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | a = 0
c = 0
g = 0
t = 0
i = 0
with open('rosalind_dna.txt') as stringfile:
s = [x.strip('\n') for x in stringfile.readlines()][0]
for i in range(len(s)):
if s[i] == 'A':
a += 1
elif s[i] == 'C':
c += 1
elif s[i] == 'G':
g += 1
elif s[i] == 'T':
t += 1
output = open("output.txt", 'w')
output.write(str(a) + ' ' + str(c) + ' ' + str(g) + ' ' + str(t))
output.close()
| [
"bendavidsteel@gmail.com"
] | bendavidsteel@gmail.com |
6fd83ce906f2fcd72c9fb358506d161faa340b81 | fe5ac5699513ad6375d1ea2d1b3ae46bdc644009 | /pytritex/graph_utils/branch_remover.py | eb19454803362d5dca497b6d0df5b422538f85e4 | [] | no_license | lucventurini/pytritex | ec8a9aba11bf688b073db2e5012081605e291c66 | 2bfde1dedd4d14584f04fe430fbb4401e481b3ad | refs/heads/master | 2023-03-27T08:45:44.104355 | 2020-08-27T09:12:12 | 2020-08-27T09:12:12 | 260,050,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,258 | py | from pytritex.graph_utils.make_super_scaffolds import make_super_scaffolds, add_missing_scaffolds, add_statistics
import time
from dask.distributed import Client
import dask.dataframe as dd
import logging
import os
from functools import partial
import numpy as np
logger = logging.getLogger("distributed.worker")
def iteration(counter, membership, excluded, links, save_dir, client, info, ncores):
run = True
logger.warning("%s Starting run %s , excluded: %s",
time.ctime(), counter, len(excluded))
out = make_super_scaffolds(links=links, save_dir=save_dir,
client=client, membership=membership,
info=info, excluded=excluded, ncores=ncores,
to_parquet=False)
membership = out["membership"]
# dd_membership = dd.read_parquet(membership, infer_divisions=True)
# Now we have to exclude from consideration those scaffolds
# that are the backbone of super-scaffolds where there is at least a scaffold with
# rank > 1.
# Ie: remove the backbones so that the branches can be reassigned to somewhere else.
a = membership.merge(
membership.loc[membership["rank"] > 1, ["super", "bin"]].reset_index(drop=False).drop_duplicates(),
how="inner", on=["super", "bin"])
assert "scaffold_index" in a.columns
add = a.loc[a["rank"] == 0, :]
if add.shape[0].compute() == 0:
run = False
else:
# Drop all the links between the backbone of the "fuzzy" scaffolds and the spikes.
excluded.update(set(add["scaffold_index"].values.compute().tolist()))
assert len(excluded) > 0
logger.warning("%s Run %s excluding %s", time.ctime(), counter, len(excluded))
logger.warning("Finished run %s", counter)
return out, excluded, run
def _initial_branch_remover(client: Client,
save_dir: str,
links: str,
info: str, excluded: set, ncores):
print(time.ctime(), "Starting the run")
if excluded is None:
excluded = set()
links = dd.read_parquet(links, infer_divisions=True, engine="pyarrow")
info = dd.read_parquet(info, infer_divisions=True, engine="pyarrow")
scaffolds_to_use = np.unique(links[["scaffold_index1", "scaffold_index2"]].values.compute().flatten())
info_to_use = info.loc[scaffolds_to_use]
_iterator = partial(iteration,
links=links,
save_dir=save_dir,
client=client,
info=info_to_use,
ncores=ncores)
counter = 1
out, excluded, run = _iterator(counter=counter, membership=None, excluded=excluded)
membership = out["membership"]
while run is True:
counter += 1
out, excluded, run = _iterator(counter=counter,
membership=membership,
excluded=excluded)
# new_add["super"] = new_add["super"] + max_add_super
# add = dd.concat([add, new_add]).persist()
# max_add_super = add["super"].max().compute()
membership = out["membership"]
# Now we need to rejoin things
maxidx = out["membership"]["super"].max().compute()
# add["super"] = add["super"] + maxidx
# out["membership"] = dd.concat([out["membership"], add]).persist()
out["membership"] = add_missing_scaffolds(info, out["membership"],
maxidx, excluded, client, save_dir)
out["membership"], out["info"] = add_statistics(out["membership"], client)
dd.to_parquet(out["membership"], os.path.join(save_dir, "membership"),
compute=True, compression="gzip", engine="pyarrow", schema="infer")
# res = dd.from_pandas(res, chunksize=1000)
dd.to_parquet(out["info"], os.path.join(save_dir, "result"), compute=True,
compression="gzip", engine="pyarrow", schema="infer")
out = {"membership": os.path.join(save_dir, "membership"),
"info": os.path.join(save_dir, "result")}
return out, excluded
| [
"lucventurini@gmail.com"
] | lucventurini@gmail.com |
7d2d02d8ebc5d63a3b86766ce7a466835da3c7fb | 16caebb320bb10499d3712bf0bdc07539a4d0007 | /objc/_SleepHealth.py | b5472aa777bd2319957d026862d02d97921f506b | [] | no_license | swosnick/Apple-Frameworks-Python | 876d30f308a7ac1471b98a9da2fabd22f30c0fa5 | 751510137e9fa35cc806543db4e4415861d4f252 | refs/heads/master | 2022-12-08T07:08:40.154553 | 2020-09-04T17:36:24 | 2020-09-04T17:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | '''
Classes from the 'SleepHealth' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
HKSHSleepDaySummary = _Class('HKSHSleepDaySummary')
HKSleepHealthStore = _Class('HKSleepHealthStore')
HKSHSleepPeriod = _Class('HKSHSleepPeriod')
HKSHSleepPeriodSegment = _Class('HKSHSleepPeriodSegment')
HKSHGoalProgressEngine = _Class('HKSHGoalProgressEngine')
HKSHGoalProgress = _Class('HKSHGoalProgress')
HKSHSleepDaySummaryQuery = _Class('HKSHSleepDaySummaryQuery')
HKSHSleepDaySummaryQueryConfiguration = _Class('HKSHSleepDaySummaryQueryConfiguration')
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
297cab4b6c1665c38f678ba78f9e13312fea5f00 | 00d5c1aa69da3b388d91761497cd17ae2c1bb91d | /tests/elephunk/ui_methods_test.py | c5c8985961fd15eb5277a56b3fc9392d6ae89250 | [
"MIT"
] | permissive | pitluga/elephunk | e33c75f6d4f60e14145543834b722e35ad0c335f | 98649bed91a86b86e008b84093cf85b2d6612b3b | refs/heads/master | 2021-07-13T16:54:54.261255 | 2013-12-18T14:26:11 | 2013-12-18T14:26:11 | 6,147,046 | 0 | 0 | MIT | 2021-03-25T21:56:15 | 2012-10-09T20:14:04 | Python | UTF-8 | Python | false | false | 356 | py | import unittest
from elephunk.ui_methods import *
class HelpersTest(unittest.TestCase):
def test_percent(self):
self.assertEquals("40%", percent(None, 2,5))
def test_percent_with_decimal(self):
self.assertEquals("33.33%", percent(None, 1,3))
def test_infinity(self):
self.assertEquals("infinity", percent(None, 1,0))
| [
"tony.pitluga@gmail.com"
] | tony.pitluga@gmail.com |
0c78464536bc013f9259bddca14b65ba53fc0439 | 3aa2ae88c93b39df4c4c1e9a6e6869fb40350bb3 | /WeiboLike.py | b3c9382a90faa038b5baab890dd42154b7845ff1 | [] | no_license | ycd2003/BulkIndex | 73488e0ebbd4e00aa827882dde8a93e9431c5c02 | 1760bbc408c50ac714bd20949cb88ea2eef36386 | refs/heads/master | 2020-12-15T08:22:15.751894 | 2019-03-13T05:36:53 | 2019-03-13T05:36:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | # -*- coding:utf-8 -*-
from HbaseTools import HbaseInfoTask
from RedisTools import RedisTools
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import time
import logging
from conf import ES_ADDR,COUNT_NUM
logging.basicConfig(filename='log/weibo_like.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(module)s :%(message)s',
datefmt='%Y-%m-%d %H:%M:%S %p', level=logging.WARNING)
class GetWeiboLike(object):
def __init__(self):
self.hbase_con = HbaseInfoTask()
self.redis_con = RedisTools()
self.es = Elasticsearch(ES_ADDR)
def es_ping(self):
if not self.es.ping():
self.es = Elasticsearch(ES_ADDR)
def run(self):
action_list = []
count = 0
start = int(time.time())
cunzai = 0
while True:
rowkey = self.redis_con.get_rowkey("wb_like")
if rowkey == None:
if len(action_list) > 0:
logging.warning("重复存入elasticsearch当中%d条数据" % cunzai)
cunzai = 0
self.commit(action_list)
action_list.clear()
start = int(time.time())
count = 0
time.sleep(10)
continue
param = None
if "|||||" in rowkey:
params = rowkey.split("|||||")[1]
param = params.split(",")
rowkey = rowkey.split("|||||")[0]
boo = self.es.exists("wb_like", "sino", rowkey)
action = {
"_index": "wb_like",
"_type": "sino",
"_id": "",
}
if boo:
map = self.hbase_con.getResultByRowkey("WEIBO_LIKE_TABLE", rowkey, "wb_like",param)
if not map:
continue
cunzai = cunzai + 1
action["_op_type"] = "update"
action['doc'] = map
else:
map = self.hbase_con.getResultByRowkey("WEIBO_LIKE_TABLE", rowkey, "wb_like")
if not map:
continue
action['_source'] = map
action['_id'] = rowkey
action_list.append(action)
end = int(time.time())
count = count + 1
if count > COUNT_NUM or (end-start) > 30:
logging.warning("重复存入elasticsearch当中%d条数据" % cunzai)
cunzai = 0
if len(action_list) > 0:
self.es_ping()
self.commit(action_list)
start = int(time.time())
count = 0
action_list.clear()
def commit(self,action_list):
try:
helpers.bulk(self.es, action_list)
except Exception as e:
log_info = "index:wb_like,\terror:" + str(e)
logging.error(log_info)
helpers.bulk(self.es, action_list)
logging.warning("提交成功:%d条数据" % len(action_list))
if __name__=="__main__":
getWeiboLike = GetWeiboLike()
getWeiboLike.run() | [
"416713448@qq.com"
] | 416713448@qq.com |
da4dcc87474cb0400f18d2293569fa8d6e209747 | 1a9852fe468f18e1ac3042c09286ccda000a4135 | /Specialist Certificate in Data Analytics Essentials/DataCamp/06-Writing_Functions_in_Python/e11_a_read-only_open_context_manager.py | 956cc65e4b032f0ee852b2821b8fb559599e271b | [] | no_license | sarmabhamidipati/UCD | 452b2f1e166c1079ec06d78e473730e141f706b2 | 101ca3152207e2fe67cca118923896551d5fee1c | refs/heads/master | 2023-08-14T15:41:24.312859 | 2021-09-22T17:33:01 | 2021-09-22T17:33:01 | 386,592,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | """
A read-only open() context manager
Yield control from open_read_only() to the context block, ensuring that the read_only_file object
gets assigned to my_file.
Use read_only_file's .close() method to ensure that you don't leave open files lying around.
"""
from contextlib import contextmanager
import time
@contextmanager
def open_read_only(filename):
"""Open a file in read-only mode.
Args:
filename (str): The location of the file to read
Yields:
file object
"""
read_only_file = open(filename, mode='r')
# Yield read_only_file so it can be assigned to my_file
yield read_only_file
# Close read_only_file
read_only_file.close()
with open_read_only('my_file.txt') as my_file:
print(my_file.read())
| [
"b_vvs@yahoo.com"
] | b_vvs@yahoo.com |
f9f887d420ccb38721d63c359a51f6cdb77c8b91 | 579a34f1f705d627bc3f7b504206fcc04d58d1bf | /sample_app/main.py | 54dcd7d3f82215de925ced4c5621ac284b44eb58 | [] | no_license | aldricaj/jenkins-test-repo | b5ae6216d25b52e7ac2c8a73724cf0da79b92998 | 50f2d900afea05fe3ab4a8da22986400c45df931 | refs/heads/master | 2021-06-10T19:53:22.121251 | 2019-05-30T14:45:45 | 2019-05-30T14:45:45 | 138,117,009 | 0 | 0 | null | 2021-03-19T23:32:32 | 2018-06-21T03:56:48 | Python | UTF-8 | Python | false | false | 201 | py |
import flask_routes as routes
from gevent.wsgi import WSGIServer
def main():
HTTP_SERVER = WSGIServer(('',5080), routes.app)
HTTP_SERVER.serve_forever()
if __name__ == "__main__":
main() | [
"aldricaj@mail.uc.edu"
] | aldricaj@mail.uc.edu |
7989f16a8528778f1e35a6f8b1fb39089e4811e0 | 01ec3bd746e0a99e795e7758a27a1745368d2857 | /taskmate/urls.py | 8b265dc6de4c5caa4c2a33b2451634fb78d9a098 | [] | no_license | Gaurav4code/House_price_prediction | 1b23f7cf8f3b6bc7150f2a5524a3dbc838c223fc | afe558c57152ebbbcbaae2a3acff5e86c5f5ba1b | refs/heads/master | 2023-06-26T10:02:49.798201 | 2021-07-19T12:23:58 | 2021-07-19T12:23:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.contrib import admin
from django.urls import path, include
from todolist_app import views as todolist_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', todolist_views.index, name='index'),
path('todolist/', include('todolist_app.urls')),
path('contact', todolist_views.contact, name='contact'),
path('about-us', todolist_views.about, name='about'),
]
| [
"shubhamsarda99@yahoo.com"
] | shubhamsarda99@yahoo.com |
0b5bce0fba10d328e955aeba3ab3d9ea81866db8 | 0614a50bf1f90c0ebb96f85d224f4ff0b7821874 | /adaline.py | aa64a6ddc6c1a5328950edaada387fa1f861af56 | [] | no_license | husainshaikh895/Machine-Learning-Algorithms | e533890c9f341ff25d90b2ec3d81dc272670bf73 | a832a87881ece036dc5ad99dfbfa2d58ffb4a4c2 | refs/heads/master | 2020-05-03T08:39:22.682765 | 2020-01-08T12:16:06 | 2020-01-08T12:16:06 | 178,531,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class AdalineGD:
def __init__(self, epoch = 100, learn_rate = 0.01, random_state = 1):
# since updates to weights are real numbers it needs more iterations to converge as compared to Perceptron
self.epoch = epoch
self.learn_rate = learn_rate
self.random_state = random_state
def fit(self, X, y):
'''
cost function
J(w) = 1/2 * sum(y - @(z)) ** 2
here, z = x * w
to find the minimum of gradient
diff(J(w)) = -(sum(y-@(z)))*x
we move in the opposite direction of this with learning rate
step = - learn_rate * diff(J(w))
therefore,
step = learn_rate * sum(y - @(z)) * x
'''
rgen = np.random.RandomState(self.random_state)
# initialise weights with mean 0 and sd 1
self.w_ = rgen.normal(loc = 0.0, scale = 0.01, size = X.shape[1] + 1)
self.cost_ = []
for i in range(self.epoch):
# x * w
net_input = self.net_input(X)
# same thing
output = self.activation(net_input)
errors = (y - output)
self.w_[1:] += self.learn_rate * X.T.dot(errors)
self.w_[0] += self.learn_rate * errors.sum()
# j(w) = 1/2* sum(y-@(z))^2
cost = (errors**2).sum()/2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return (np.dot(X, self.w_[1:]) + self.w_[0])
def activation(self, X):
'''
this functions is there to show how information flows in more complex algorithms
we can also ommit this function for adaline
'''
return X
def predict(self, X):
return np.where(self.activation(self.net_input(X))>=0.0, 1, -1)
def main():
# A = AdalineGD()
# test example
# I have tried it with my own example to see how it works, and it does the job pretty well
# X = resident, 18+, male, married
X = np.array([[0,0,0,0],[0,0,0,1],[0,0,1,0],[0,0,1,1],[1,1,0,0],[1,0,1,0],[1,1,0,0]])
# y = can vote
y = np.array([-1, -1, -1, -1, 1, -1, 1])
'''
# train
A.fit(X, y)
# predict
X = np.array([[1,1,1,1],[1,1,1,0],[1,0,1,1],[0,1,1,1]])
print(A.predict(X))
'''
# Standardization
# x = (x - mean) / sd
# althought it is already standardised
X_std = X.copy()
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:,0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:,1].std()
X_std[:, 2] = (X[:, 2] - X[:, 2].mean()) / X[:,2].std()
X_std[:, 3] = (X[:, 3] - X[:, 3].mean()) / X[:,3].std()
# lets visualise the convergence
fig, ax = plt.subplots(nrows=1, ncols=2, figsize = (10,4))
ada1 = AdalineGD(epoch=50, learn_rate = 0.01).fit(X_std,y)
ax[0].plot(range(1, len(ada1.cost_)+1),
np.log10(ada1.cost_), marker = 'o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-Squared-Error)')
ax[0].set_title('Adaline learn_rate : 0.01')
ada2 = AdalineGD(epoch=50, learn_rate = 0.0001).fit(X_std,y)
ax[1].plot(range(1, len(ada1.cost_)+1),
np.log10(ada2.cost_), marker = 'o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('log(Sum-Squared-Error)')
ax[1].set_title('Adaline learn_rate : 0.0001')
plt.show()
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | husainshaikh895.noreply@github.com |
d891d3619c03f925a4a0ca508048fb88fa43012e | cd72d36fa6196c4394df16cd9edcc935e209c8aa | /app/user/views.py | 47d3b399c743bc6b0407d5c6cfede940fdf2b852 | [] | no_license | sebastbonilla/recipe-app-api | 17a82563af4371093c20a86256f7c893db2c9545 | f48959b36a8745045b4cccc3326c20a7ef93924d | refs/heads/master | 2023-01-10T21:28:21.005262 | 2020-11-08T18:53:41 | 2020-11-08T18:53:41 | 296,969,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from .serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Creates a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manages the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieves and returns authenticated user"""
return self.request.user
| [
"sebastbonilla@gmail.com"
] | sebastbonilla@gmail.com |
d3770de2ae95f5979154c27ae9ccd77472d0e4d2 | 0a6f284b1a7c8b16911ebf33076abc38778c752f | /app/run.py | 37911045108a9bf98a86cd405ae50114df2a13ca | [
"Apache-2.0"
] | permissive | branky/blockd3 | 2298b3eafd1b9c50b0374dd1456c0fcdf2068fab | 27e78fd89f44af95ad65b1203c02156db64333d0 | refs/heads/master | 2020-12-25T09:00:35.047437 | 2012-11-19T06:08:49 | 2012-11-19T06:08:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import logging
from ghost import GhostTestCase, Ghost
from app import make_app
app = make_app("test")
PORT = 5000
base_url = "http://localhost:%s/dist/" % PORT
class Blockd3GhostTest(GhostTestCase):
port = PORT
display = False
log_level = logging.INFO
def __new__(cls, *args, **kwargs):
"""Creates Ghost instance."""
if not hasattr(cls, 'ghost'):
cls.ghost = Ghost(display=cls.display,
wait_timeout=10,
viewport_size=cls.viewport_size,
log_level=cls.log_level)
return super(Blockd3GhostTest, cls).__new__(cls, *args, **kwargs)
@classmethod
def create_app(cls):
return app
def test_open(self):
"""
Test that the page loads
"""
page, resources = self.ghost.open(base_url)
self.assertEqual(page.url, base_url)
self.ghost.click("#run")
if __name__ == "__main__":
unittest.main() | [
"nick.bollweg@gmail.com"
] | nick.bollweg@gmail.com |
9e73460e1f735eeaa8a782f09d054c7d70dec0fa | d8ddc49fbfbe00bff0631ed05ddcaa9f1a10c3fd | /Data_Collection/test.py | ff9ce6ea3b08302ac987ed783b2f17b59f8129f7 | [] | no_license | reddit-analyzer/data_acq | 48d7bf8f186abc462a8f851b70a83edba2c473f3 | 2c04d42ddb4cbfcfe138508f464c295a22cf6c75 | refs/heads/master | 2016-09-06T11:16:51.036171 | 2015-12-15T22:19:57 | 2015-12-15T22:19:57 | 42,746,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,683 | py | <<<<<<< HEAD
import praw
import time
import csv
import re
=======
__author__ = 'vincentpham'
import praw
import time
>>>>>>> 1d0a2957910f479630465146050acfc2b876e927
#Comments data
r = praw.Reddit(user_agent='blah')
submissions = r.get_subreddit('aww').get_hot(limit=1)
y = [x.comments for x in submissions]
for x in submissions:
y = x
comment_object = y[0][0]
comment_str = comment_object.body #Get Comment
comment_usr = comment_object.author._case_name #Get User\
comment_upvotes = comment_object.score #Number of upvotes
comment_subreddit_id = comment_object.subreddit_id
comment_subreddit = comment_object.subreddit
comment_gilds = comment_object.gilded
comment_epoch_time = comment_object.created
comment_created = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(r.created))
comment_edited = comment_object.edited
e = [str(x) for x in submissions]
for x in submissions:
print x
user = r.get_redditor('ketralnis')
user.link_karma
user.comment_karma
user.fullname
s = r.get_submission('http://www.reddit.com/r/redditdev/comments/s3vcj/_/c4axeer')
your_comment = s.comments[0]
#thread data:
r = praw.Reddit(user_agent='blah')
submissions = r.get_subreddit('aww').get_hot(limit=1)
<<<<<<< HEAD
subreddit_name = 'aww'
def threadData(subreddit_name = 'aww', limit = 25):
#subreddit = r.get_subreddit(subreddit_name)
r = praw.Reddit(user_agent='blah')
submissions = r.get_subreddit(subreddit_name).get_hot(limit = limit)
list_items = [item for item in submissions]
thread_data = []
ranking = 0
for post in list_items:
subreddit_name = post.subreddit._case_name
reddit_usernames = post.author._case_name
total_num_comments = post.num_comments
post_timestamp_tmp = post.created
post_timestamp_final = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(post_timestamp_tmp))
domains = cleanDomain(post.domain)
gilded_score = post.gilded
post_score = post.score
thread_ids = post.id #matched with comment's domain called _submission_id
now_time = time.strftime("%Y-%m-%d %H:%M:%S %Z", time.localtime())
ranking += 1
thread_data.append([subreddit_name,
reddit_usernames,
thread_ids,
total_num_comments,
domains,
gilded_score,
post_score,
ranking,
post_timestamp_final,
now_time])
csvSave(thread_data)
return thread_data
def cleanDomain(domain_name):
if 'self.' in domain_name:
mapped_name = 'reddit.com'
return mapped_name
else:
return domain_name
def csvSave(list_of_list):
result_csv = open("testredditdata.csv", "w")
content = csv.writer(result_csv)#, delimiter = ',', quoting = csv.QUOTE_NONE, quotechar = '', lineterminator='\r\n')
for item in list_of_list:
content.writerow(item)
result_csv.close()
return "Saved"
test = threadData('aww')
#y = [x.comments for x in submissions]
y = []
for x in submissions:
y.append(x)
#REDDIT_USERNAME
for item in y:
try:
print item.author._case_name
except:
print "DELETED"
#TOTAL COMMENTS
for item in y:
try:
print item.num_comments
except:
print "DELETED"
#POST TIMESTAMP
for item in y:
try:
print item.created
except:
print "DELETED"
#SOURCE or DOMAIN
for item in y:
try:
print item.domain
except:
print "DELETED"
#GILDED
for item in y:
try:
print item.gilded
except:
print "DELETED"
#SCORE
for item in y:
try:
print item.score
except:
print "DELETED"
#THREAD ID
for item in y:
try:
print item.id
except:
print "DELETED"
=======
y = [x.comments for x in submissions]
for x in submissions:
y = x
>>>>>>> 1d0a2957910f479630465146050acfc2b876e927
author = y.author._case_name
created = y.created
domain = y.domain
edit_time = y.edited
gilded = y.gilded
media = y.media
media_embedded = y.media_embed #?
num_comments = y.num_comments
score = y.score
secured_media = y.secured_media #?
secure_media_embed = y.secure_media_embed
text = y.selftext_html
stickied = y.stickied
subreddit_name = y.subreddit._case_name
subreddit_id = y.subreddit_id
title = y.title
url = y.url
<<<<<<< HEAD
string_test = u'hello\nbye'
# test = re.sub(r'<[^>]blockquote>*<[^>]/blockquote>','', str(string_test))
test = re.sub(r'\n','', str(string_test))
=======
>>>>>>> 1d0a2957910f479630465146050acfc2b876e927
| [
"vincentpham@gmail.com"
] | vincentpham@gmail.com |
4037da61e9d0b78c4af2e78d472c172f4b190b68 | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/mpl_toolkits/axes_grid1/inset_locator.py | 49576ff9f37100ac6d33812731d5634d57e34693 | [] | no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,733 | py | """
A collection of functions and objects for creating or placing inset axes.
"""
from matplotlib import _api, docstring
from matplotlib.offsetbox import AnchoredOffsetbox
from matplotlib.patches import Patch, Rectangle
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxTransformTo
from matplotlib.transforms import IdentityTransform, TransformedBbox
from . import axes_size as Size
from .parasite_axes import HostAxes
class InsetPosition:
@docstring.dedent_interpd
def __init__(self, parent, lbwh):
"""
An object for positioning an inset axes.
This is created by specifying the normalized coordinates in the axes,
instead of the figure.
Parameters
----------
parent : `matplotlib.axes.Axes`
Axes to use for normalizing coordinates.
lbwh : iterable of four floats
The left edge, bottom edge, width, and height of the inset axes, in
units of the normalized coordinate of the *parent* axes.
See Also
--------
:meth:`matplotlib.axes.Axes.set_axes_locator`
Examples
--------
The following bounds the inset axes to a box with 20%% of the parent
axes's height and 40%% of the width. The size of the axes specified
([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
>>> parent_axes = plt.gca()
>>> ax_ins = plt.axes([0, 0, 1, 1])
>>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])
>>> ax_ins.set_axes_locator(ip)
"""
self.parent = parent
self.lbwh = lbwh
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = BboxTransformTo(bbox_parent)
bbox_inset = Bbox.from_bounds(*self.lbwh)
bb = TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super().__init__(
loc, pad=0., child=None, borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform
)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
self.axes = ax
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = TransformedBbox(bbox_canvas, tr)
return bb
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
super().__init__(
bbox_to_anchor, None, loc,
borderpad=borderpad, bbox_transform=bbox_transform
)
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
def get_extent(self, renderer):
bbox = self.get_bbox_to_anchor()
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = bbox.width * r + a * dpi
r, a = self.y_size.get_size(renderer)
height = bbox.height * r + a * dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width + 2 * pad, height + 2 * pad, xd + pad, yd + pad
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super().__init__(
bbox_to_anchor, None, loc, borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
bb = TransformedBbox(self.axes.viewLim, self.parent_axes.transData)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return (abs(bb.width * self.zoom) + 2 * pad,
abs(bb.height * self.zoom) + 2 * pad,
pad, pad)
class BboxPatch(Patch):
@docstring.dedent_interpd
def __init__(self, bbox, **kwargs):
"""
Patch showing the shape bounded by a Bbox.
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
Bbox to use for the extents of this patch.
**kwargs
Patch properties. Valid arguments include:
%(Patch_kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
super().__init__(**kwargs)
self.bbox = bbox
def get_path(self):
# docstring inherited
x0, y0, x1, y1 = self.bbox.extents
return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)],
closed=True)
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
"""
Helper function to obtain the location of a corner of a bbox
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
loc : {1, 2, 3, 4}
Corner of *bbox*. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
x, y : float
Coordinates of the corner specified by *loc*.
"""
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
"""
Helper function to obtain a Path from one bbox to another.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to use. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to use. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
path : `matplotlib.path.Path`
A line segment from the *loc1* corner of *bbox1* to the *loc2*
corner of *bbox2*.
"""
if isinstance(bbox1, Rectangle):
bbox1 = TransformedBbox(Bbox.unit(), bbox1.get_transform())
if isinstance(bbox2, Rectangle):
bbox2 = TransformedBbox(Bbox.unit(), bbox2.get_transform())
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
return Path([[x1, y1], [x2, y2]])
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
Connect two bboxes with a straight line.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to draw the line. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to draw the line. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn. Valid arguments include:
%(Patch_kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
if 'fill' in kwargs:
super().__init__(**kwargs)
else:
fill = bool({'fc', 'facecolor', 'color'}.intersection(kwargs))
super().__init__(fill=fill, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
# docstring inherited
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
class BboxConnectorPatch(BboxConnector):
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
"""
Connect two bboxes with a quadrilateral.
The quadrilateral is specified by two lines that start and end at
corners of the bboxes. The four sides of the quadrilateral are defined
by the two lines given, the line between the two corners specified in
*bbox1* and the line between the two corners specified in *bbox2*.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1a, loc2a : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the first line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc1b, loc2b : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the second line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn:
%(Patch_kwdoc)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
super().__init__(bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
# docstring inherited
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = [*path1.vertices, *path2.vertices, path1.vertices[0]]
return Path(path_merged)
def _add_inset_axes(parent_axes, inset_axes):
"""Helper function to add an inset axes and disable navigation in it"""
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
@docstring.dedent_interpd
def inset_axes(parent_axes, width, height, loc='upper right',
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an inset axes with a given width and height.
Both sizes used can be specified either in inches or percentage.
For example,::
inset_axes(parent_axes, width='40%%', height='30%%', loc=3)
creates in inset axes in the lower left corner of *parent_axes* which spans
over 30%% in height and 40%% in width of the *parent_axes*. Since the usage
of `.inset_axes` may become slightly tricky when exceeding such standard
cases, it is recommended to read :doc:`the examples
</gallery/axes_grid1/inset_locator_demo>`.
Notes
-----
The meaning of *bbox_to_anchor* and *bbox_to_transform* is interpreted
differently from that of legend. The value of bbox_to_anchor
(or the return value of its get_points method; the default is
*parent_axes.bbox*) is transformed by the bbox_transform (the default
is Identity transform) and then interpreted as points in the pixel
coordinate (which is dpi dependent).
Thus, following three calls are identical and creates an inset axes
with respect to the *parent_axes*::
axins = inset_axes(parent_axes, "30%%", "40%%")
axins = inset_axes(parent_axes, "30%%", "40%%",
bbox_to_anchor=parent_axes.bbox)
axins = inset_axes(parent_axes, "30%%", "40%%",
bbox_to_anchor=(0, 0, 1, 1),
bbox_transform=parent_axes.transAxes)
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
width, height : float or str
Size of the inset axes to create. If a float is provided, it is
the size in inches, e.g. *width=1.3*. If a string is provided, it is
the size in relative units, e.g. *width='40%%'*. By default, i.e. if
neither *bbox_to_anchor* nor *bbox_transform* are specified, those
are relative to the parent_axes. Otherwise they are to be understood
relative to the bounding box provided via *bbox_to_anchor*.
loc : int or str, default: 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored to. If None,
a tuple of (0, 0, 1, 1) is used if *bbox_transform* is set
to *parent_axes.transAxes* or *parent_axes.figure.transFigure*.
Otherwise, *parent_axes.bbox* is used. If a tuple, can be either
[left, bottom, width, height], or [left, bottom].
If the kwargs *width* and/or *height* are specified in relative units,
the 2-tuple [left, bottom] cannot be used. Note that,
unless *bbox_transform* is set, the units of the bounding box
are interpreted in the pixel coordinate. When using *bbox_to_anchor*
with tuple, it almost always makes sense to also specify
a *bbox_transform*. This might often be the axes transform
*parent_axes.transAxes*.
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox that contains the inset axes.
If None, a `.transforms.IdentityTransform` is used. The value
of *bbox_to_anchor* (or the return value of its get_points method)
is transformed by the *bbox_transform* and then interpreted
as points in the pixel coordinate (which is dpi dependent).
You may provide *bbox_to_anchor* in some normalized coordinate,
and give an appropriate transform (e.g., *parent_axes.transAxes*).
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created will be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes_kwdoc)s
borderpad : float, default: 0.5
Padding between inset axes and the bbox_to_anchor.
The units are axes font size, i.e. for a default font size of 10 points
*borderpad = 0.5* is equivalent to a padding of 5 points.
Returns
-------
inset_axes : *axes_class*
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_transform in [parent_axes.transAxes,
parent_axes.figure.transFigure]:
if bbox_to_anchor is None:
_api.warn_external("Using the axes or figure transform requires a "
"bounding box in the respective coordinates. "
"Using bbox_to_anchor=(0, 0, 1, 1) now.")
bbox_to_anchor = (0, 0, 1, 1)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
if (isinstance(bbox_to_anchor, tuple) and
(isinstance(width, str) or isinstance(height, str))):
if len(bbox_to_anchor) != 4:
raise ValueError("Using relative units for width or height "
"requires to provide a 4-tuple or a "
"`Bbox` instance to `bbox_to_anchor.")
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def zoomed_inset_axes(parent_axes, zoom, loc='upper right',
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an anchored inset axes by scaling a parent axes. For usage, also see
:doc:`the examples </gallery/axes_grid1/inset_locator_demo2>`.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
zoom : float
Scaling factor of the data axes. *zoom* > 1 will enlargen the
coordinates (i.e., "zoomed in"), while *zoom* < 1 will shrink the
coordinates (i.e., "zoomed out").
loc : int or str, default: 'upper right'
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored to. If None,
*parent_axes.bbox* is used. If a tuple, can be either
[left, bottom, width, height], or [left, bottom].
If the kwargs *width* and/or *height* are specified in relative units,
the 2-tuple [left, bottom] cannot be used. Note that
the units of the bounding box are determined through the transform
in use. When using *bbox_to_anchor* it almost always makes sense to
also specify a *bbox_transform*. This might often be the axes transform
*parent_axes.transAxes*.
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox that contains the inset axes.
If None, a `.transforms.IdentityTransform` is used (i.e. pixel
coordinates). This is useful when not providing any argument to
*bbox_to_anchor*. When using *bbox_to_anchor* it almost always makes
sense to also specify a *bbox_transform*. This might often be the
axes transform *parent_axes.transAxes*. Inversely, when specifying
the axes- or figure-transform here, be aware that not specifying
*bbox_to_anchor* will use *parent_axes.bbox*, the units of which are
in display (pixel) coordinates.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created will be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes_kwdoc)s
borderpad : float, default: 0.5
Padding between inset axes and the bbox_to_anchor.
The units are axes font size, i.e. for a default font size of 10 points
*borderpad = 0.5* is equivalent to a padding of 5 points.
Returns
-------
inset_axes : *axes_class*
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
"""
Draw a box to mark the location of an area represented by an inset axes.
This function draws a box in *parent_axes* at the bounding box of
*inset_axes*, and shows a connection with the inset axes by drawing lines
at the corners, giving a "zoomed in" effect.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes which contains the area of the inset axes.
inset_axes : `matplotlib.axes.Axes`
The inset axes.
loc1, loc2 : {1, 2, 3, 4}
Corners to use for connecting the inset axes and the area in the
parent axes.
**kwargs
Patch properties for the lines and box drawn:
%(Patch_kwdoc)s
Returns
-------
pp : `matplotlib.patches.Patch`
The patch drawn to represent the area of the inset axes.
p1, p2 : `matplotlib.patches.Patch`
The patches connecting two corners of the inset axes and its area.
"""
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
if 'fill' in kwargs:
pp = BboxPatch(rect, **kwargs)
else:
fill = bool({'fc', 'facecolor', 'color'}.intersection(kwargs))
pp = BboxPatch(rect, fill=fill, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| [
"82611064+python019@users.noreply.github.com"
] | 82611064+python019@users.noreply.github.com |
8577bd44b162241340efce5fcd8fae463442057c | 049c2f8b8979fb7738aecde3fb89234e3554c7fe | /psp class/6.b)Write a program to perform addition of two square matrices.py | 3e2a4c38d9a6bce8f54137bd9879a469913f0700 | [] | no_license | nlakshmi-29/pspclass | 9131fc147a47b77eb22e7bdece154869038ba0c2 | 4cf038a0783f1fe9718669b66b9f4115b7fd3fa6 | refs/heads/main | 2023-07-06T05:11:11.364144 | 2021-08-07T17:52:34 | 2021-08-07T17:52:34 | 392,192,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | X = [[1,2,3],
[4 ,5,6],
[7 ,8,9]]
Y = [[9,8,7],
[6,5,4],
[3,2,1]]
result = [[0,0,0],
[0,0,0],
[0,0,0]]
# iterate through rows
for i in range(len(X)):
# iterate through columns
for j in range(len(X[0])):
result[i][j] = X[i][j] + Y[i][j]
for r in result:
print(r)
| [
"noreply@github.com"
] | nlakshmi-29.noreply@github.com |
72c5a7ef24d9780bf457ee18843bfa0aa5b2843a | 9c1eb98f706fb286e9e6d450c5d80f321bdaa2ca | /authors/apps/articles/tests/test_tags.py | 717f5746a3f87050d43d23139a66266039dfca31 | [
"MIT",
"BSD-3-Clause"
] | permissive | Tittoh/blogAPI | b9d75a36987244e590df5cbc68fbf98c81e54996 | 19f5550f97905ee4a97574cab799d42a0471f12b | refs/heads/develop | 2023-08-04T16:53:34.420960 | 2023-08-03T19:34:16 | 2023-08-03T19:34:16 | 223,628,636 | 1 | 1 | MIT | 2023-09-03T07:41:44 | 2019-11-23T17:37:38 | Python | UTF-8 | Python | false | false | 6,777 | py | import json
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from authors.apps.authentication.models import User
from authors.apps.authentication.tests.utils import TEST_USER
from django.core import mail
from authors.apps.authentication.views import VerifyAccount
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from authors.apps.authentication.utils import generate_token
from rest_framework.test import force_authenticate
from rest_framework.test import APIRequestFactory
from authors.apps.articles.models import Article
user = {
"user": {
"username": "test",
"email": "info@test.co",
"password": "Test123."
}
}
class TestTags(APITestCase):
"""
This class defines the test suite for the tags of an article.
"""
def setUp(self):
"""Define the test client and other test variables."""
self.article1 = {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["django-rest", "python"]
}
}
self.bad_tag_list = {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": "django-rest, python"
}
}
self.tag_tuple = {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ("django-rest, python")
}
}
self.user = {
"user": {
"username": "test",
"email": "info@test.co",
"password": "Test123."
}
}
def login_user(self, user=user):
"""
login user
"""
response = self.client.post(
reverse("authentication:login"),
user,
format='json')
response.render()
user = json.loads(response.content)
return user
def create_a_user(self, username='test', email='info@test.co',
password='Test123.'):
"""
Create a test user
"""
user = User.objects.create_user(username, email, password)
user.save()
return user
def create_article(self):
"""
Create a test article
"""
tags = ["django-rest", "python"]
user = User.objects.get()
article = Article.objects.create(
title="How to train your dragon",
description="Ever wonder how?",
body="You have to believe",
tagList=tags,
author=user.profile)
article.save()
return article
def verify_user(self, user):
"""Verify user"""
token = generate_token.make_token(user)
uid = urlsafe_base64_encode(force_bytes(user.pk))
request = APIRequestFactory().get(
reverse("authentication:verify", args=[uid, token]))
verify_account = VerifyAccount.as_view()
verify_account(request, uidb64=uid, token=token)
return user
#Test cases
def test_tagList_added(self):
"""Test a tagList is added when an article is created"""
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
response = self.client.post('/api/articles/',
self.article1,
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertIn("django-rest", response.content.decode()),
self.assertIn("python", response.content.decode())
def test_tagList_returned(self):
"""Test api can return a taglist with an article"""
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
self.client.post('/api/articles/',
self.article1,
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
response = self.client.get('/api/articles/',
format='json'
)
self.assertIn("django-rest", response.content.decode()),
self.assertIn("python", response.content.decode())
def test_get_tagList(self):
"""
Test api can get a tagList
"""
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
self.client.post('/api/articles/',
self.article1,
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
response = self.client.get('/api/tags/',
format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_tagList_object(self):
"""
Test a tagList cannot be a string
"""
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
response = self.client.post('/api/articles/',
self.bad_tag_list,
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertNotIn("django-rest", response.content.decode()),
self.assertNotIn("python", response.content.decode())
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_tagList_data_structure(self):
"""
Test a tagList cannot be a tuple
"""
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
response = self.client.post('/api/articles/',
self.tag_tuple,
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| [
"tittohb@gmail.com"
] | tittohb@gmail.com |
0c8356f076217c8333f9d2e64c83f2239d4fb15e | c7b03a2e9b590d343a7719a7ac107199b2fe7b74 | /modules/ravestate_wildtalk/parlai_backend.py | 46e056d30273ae893634accce81c0b4acc172423 | [
"BSD-3-Clause"
] | permissive | Roboy/ravestate | f85039955f477102f34f2c7d384087d9e212509c | e3d08e4401e9f54100b2cd918ef37286099c641b | refs/heads/master | 2022-11-08T11:20:24.621482 | 2022-10-25T08:14:25 | 2022-10-25T08:14:25 | 151,973,578 | 29 | 8 | BSD-3-Clause | 2022-10-25T08:14:27 | 2018-10-07T18:42:46 | Python | UTF-8 | Python | false | false | 407 | py | import re
from typing import Dict
from roboy_parlai import wildtalk
from reggol import get_logger
logger = get_logger(__name__)
fix_spaces = re.compile(r'\s*([?!.,]+(?:\s+[?!.,]+)*)\s*')
class Parlai_Responder:
def process(self, prompt: str, model_options: Dict[str, str]):
result = wildtalk(prompt)
return fix_spaces.sub(lambda x: "{} ".format(x.group(1).replace(" ", "")), result)
| [
"toseban@gmail.com"
] | toseban@gmail.com |
acf3d6f042cf28089810655fa79844f3911bf479 | 8701434a837802d6cecaafa660a9a655a362bdfe | /gui/gui.py | 3c08bbf48e1e60a8d36c0981166839acfb70c862 | [
"MIT"
] | permissive | Mathieu-R/tchernobyl-simulation | 668fc0aee1e5abb3015a734e6d837c4027eb8726 | 9861a43b4de75ef9966982b4f458b4e091bf3e92 | refs/heads/master | 2022-12-05T09:08:19.108403 | 2019-12-20T13:24:35 | 2019-12-20T13:24:35 | 224,417,224 | 0 | 0 | MIT | 2022-11-22T04:55:09 | 2019-11-27T11:40:14 | Python | UTF-8 | Python | false | false | 6,928 | py | import numpy as np
import tkinter as tk
from ttkthemes import themed_tk
from tkinter import ttk
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk
from edo_solver.edo import neutrons_flow_edo
from edo_solver.neutrons_flow import NeutronsFlow
from edo_solver.plot_animation import PlotAnimation
from utils import day_to_seconds, hour_to_seconds
from constants import FLOW_START, TIME_INTERVAL, TEXT_FONT, SIGMA_B_MIN, SIGMA_B_MAX, SIGMA_B_STEP
class GraphicInterface():
def __init__(self):
self.root = themed_tk.ThemedTk()
self.root.minsize(800, 600)
self.root.title("Simulation du réacteur d'une centrale nucléaire")
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
main_frame = ttk.Frame(self.root)
main_frame.grid(row=0, column=0, sticky="nsew", padx=10, pady=10)
main_frame.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1)
# contain notebook
notebook_frame = ttk.Frame(main_frame, borderwidth=8)
notebook_frame.grid(row=0, column=0, sticky="nsew")
notebook_frame.rowconfigure(0, weight=1)
notebook_frame.columnconfigure(0, weight=1)
# contains plots
notebook = ttk.Notebook(notebook_frame)
notebook.grid(row=0, column=0, sticky="nsew")
notebook.rowconfigure(0, weight=1)
# contain neutron flow plot + onglet
neutrons_flow_frame = ttk.Frame(notebook)
notebook.add(neutrons_flow_frame, text="Flux de neutrons")
# instance plot class
self.neutrons_flow_plot = PlotAnimation(tk_root=neutrons_flow_frame)
toolbar1 = NavigationToolbar2Tk(self.neutrons_flow_plot, neutrons_flow_frame)
toolbar1.update()
parameters_frame = ttk.LabelFrame(main_frame, text="Paramètres")
parameters_frame.grid(row=0, column=1, sticky="nsew")
parameters_frame.rowconfigure(0, weight=1)
parameters_frame.rowconfigure(1, weight=5)
parameters_frame.rowconfigure(2, weight=5)
parameters_frame.rowconfigure(3, weight=5)
parameters_frame.rowconfigure(4, weight=5)
parameters_frame.rowconfigure(5, weight=5)
parameters_frame.columnconfigure(0, weight=1)
self.field_I0 = tk.StringVar(value="1.0")
self.field_X0 = tk.StringVar(value="2e15")
self.field_flow0 = tk.StringVar(value=f"{FLOW_START}")
self.field_time_interval = tk.StringVar(value="10")
self.field_stop = tk.StringVar(value="100")
label_I0 = ttk.Label(parameters_frame, text="Iode initial")
entry_I0 = ttk.Entry(parameters_frame, textvariable=self.field_I0)
label_X0 = ttk.Label(parameters_frame, text="Xénon initial")
entry_X0 = ttk.Entry(parameters_frame, textvariable=self.field_X0)
label_flow0 = ttk.Label(parameters_frame, text="Flux initial")
entry_flow0 = ttk.Entry(parameters_frame, textvariable=self.field_flow0)
label_time_interval = ttk.Label(parameters_frame, text="Pas de temps (s)")
entry_time_interval = ttk.Entry(parameters_frame, textvariable=self.field_time_interval)
label_stop = ttk.Label(parameters_frame, text="Durée de la simulation (h)")
entry_stop = ttk.Entry(parameters_frame, textvariable=self.field_stop)
label_I0.grid(row=1, column=0, sticky='new')
label_X0.grid(row=1, column=0, sticky='ew')
label_flow0.grid(row=1, column=0, sticky='sew')
label_time_interval.grid(row=2, column=0, sticky='new')
label_stop.grid(row=2, column=0, sticky='ew')
entry_I0.grid(row=1, column=1, sticky='new')
entry_X0.grid(row=1, column=1, sticky='ew')
entry_flow0.grid(row=1, column=1, sticky='sew')
entry_time_interval.grid(row=2, column=1, sticky='new')
entry_stop.grid(row=2, column=1, sticky='ew')
self.start_button = ttk.Button(parameters_frame, text="Démarrer la simulation", command=self.toggle_start_stop)
self.start_button.grid(row=4, column=0, columnspan=2, sticky="new")
self.pause_button = ttk.Button(parameters_frame, text="Pause", state="disabled", command=self.toggle_play_pause)
self.pause_button.grid(row=4, column=0, columnspan=2, sticky="ew")
scalevar = tk.DoubleVar()
self.slider_title = ttk.Label(parameters_frame, text="Barres de contrôles", font=("Helvetica", 12))
self.slider_title.grid(row=5, column=0, sticky="new")
self.slider_title_variable = ttk.Label(parameters_frame, textvariable=scalevar, font=("Helvetica", 9))
self.slider_title_variable.grid(row=5, column=1, sticky="new")
self.slider_control_bars = ttk.Scale(parameters_frame, variable=scalevar, value=SIGMA_B_MIN, from_=SIGMA_B_MIN, to_=SIGMA_B_MAX, length=200, orient=tk.HORIZONTAL, command=self.update_sigma_b)
self.slider_control_bars.grid(row=5, column=0, columnspan=2, sticky="ew")
quit_button = ttk.Button(parameters_frame, text="Quitter", command=self.quit)
quit_button.grid(row=5, column=0, columnspan=2, sticky="sew")
# redimensionnement des boutons, textes
for child in parameters_frame.winfo_children():
if isinstance(child, ttk.Label):
child.config(font=TEXT_FONT)
child.grid_configure(padx=5, pady=5)
# Indique si la simulation est lancée
self.started = False
self.paused = False
# edo simulation
self.simulation = None
self.root.mainloop()
def toggle_start_stop(self):
# Lancement de la simulation
if not self.started:
self.started = True
self.start_button.config(text="Arrêter la simulation")
self.pause_button.config(state="normal")
I0 = float(self.field_I0.get())
X0 = float(self.field_X0.get())
flow0 = float(self.field_flow0.get())
time_step = float(self.field_time_interval.get())
time_end = hour_to_seconds(int(self.field_stop.get()))
# incrémente car la dernière valeur n'est pas prise en compte
full_time_range = np.arange(0, time_end + time_step, time_step)
FLOW_CI = [I0, X0, flow0] # [I(T_0), X(T_0), PHI[T_0]]
self.simulation = NeutronsFlow(
edo=neutrons_flow_edo,
ci=FLOW_CI,
full_time_range=full_time_range,
time_step=time_step
)
self.neutrons_flow_plot.animate(self.simulation, time_end)
elif self.started:
self.started = False
self.start_button.config(text="Démarrer la simulation")
self.pause_button.config(state="disabled")
self.neutrons_flow_plot.stop()
def toggle_play_pause(self):
if not self.paused:
self.paused = True
self.pause_button.config(text="Continuer")
self.neutrons_flow_plot.toggle(pause=True)
elif self.paused:
self.paused = False
self.pause_button.config(text="Pause")
self.neutrons_flow_plot.toggle(pause=False)
def update_sigma_b(self, sigma_b):
#self.slider_title.config(text=f"Barres de contrôles {sigma_b}")
if not self.simulation:
return
self.simulation.target_sigma_b = float(sigma_b)
def quit(self):
self.root.quit()
self.root.destroy() | [
"mathieu0709@gmail.com"
] | mathieu0709@gmail.com |
c1200cc0a277ef9bc3bcac3a53cd68c6ad6a3a74 | 4ef22fc4ab1e3f943105260c535b04954c389d0e | /app/api/resources.py | ff249e5c7c12177516afd90b6a1cd6064ba83246 | [] | no_license | lapshinmr/vue-flask | d3cd645367c49132cada7b03a37a60bb189c214e | 03c77b0c750f7935d3784b992c9932f28a2f003a | refs/heads/master | 2022-04-12T07:08:15.948151 | 2020-04-09T18:03:10 | 2020-04-09T18:03:10 | 253,712,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | from datetime import datetime
from flask import request
from flask_restplus import Resource
from .security import require_auth
from . import api_rest
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
@api_rest.route('/resource/<string:resource_id>')
class ResourceOne(Resource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
def post(self, resource_id):
json_payload = request.json
return {'timestamp': json_payload}, 201
@api_rest.route('/secure-resource/<string:resource_id>')
class SecureResourceOne(SecureResource):
""" Unsecure Resource Class: Inherit from Resource """
def get(self, resource_id):
timestamp = datetime.utcnow().isoformat()
return {'timestamp': timestamp}
| [
"lapshinmr@gmail.com"
] | lapshinmr@gmail.com |
d1145037916ee9d101add906734b65154a1a8a0e | 17fa8d07e31b45c3774508a93d35deba19937326 | /sewRosy/wsgi.py | fef65b3346a15bd0e4ff509746dd05a6ed565694 | [] | no_license | markgrahamdawson/sewRosy | e810772c473060096af64070cfc98cd5d60dae46 | 9034e3382128457f7e126860cf9fb3cd8bceebe1 | refs/heads/master | 2022-05-28T04:42:51.263039 | 2017-12-15T09:42:49 | 2017-12-15T09:42:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for sewRosy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sewRosy.settings")
application = get_wsgi_application()
| [
"markgrahamdawson@gmail.com"
] | markgrahamdawson@gmail.com |
28c05a44ba70abe18d6362f2f5149765c73adee1 | 4a4a24bf9521ef659d16fb08403242a77a9b9d77 | /aos_l10n_id/models/localization.py | 697c48b804e8e16763168c1459d8a44355bd4266 | [] | no_license | hassanfadl/Odoo12-1 | 601c4969c9d483590e8481e92ecaf4dddaac3847 | bb057424138f99d0a645d185fbd26648385fbdf7 | refs/heads/main | 2023-07-31T22:59:19.597624 | 2021-10-01T06:35:58 | 2021-10-01T06:35:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,822 | py | ##############################################################################
#
# Copyright (C) 2011 ADSOFT OpenERP Partner Indonesia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import api, fields, models, _
#from openerp import api, fields, models, _
import logging
_logger = logging.getLogger(__name__)
# try:
# import phonenumbers
# except Exception as e:
# _logger.warning(
# 'Import Error for phonenumbers, you will not be able to validate phone number.\n'
# 'Consider Installing phonenumbers or dependencies: https://pypi.python.org/pypi/phonenumbers/7.2.6.')
# raise e
class res_country_state(models.Model):
_inherit = "res.country.state"
#name = fields.Char(string='Province')
kabupaten_line = fields.One2many('res.kabupaten', 'state_id', string='Kabupaten')
class ResKabupaten(models.Model):
_name = "res.kabupaten"
_description = "List Kabupaten"
name = fields.Char(string='Kabupaten')
state_id = fields.Many2one('res.country.state', string="Province")
kecamatan_line = fields.One2many('res.kecamatan', 'kabupaten_id', string='Kecamatan')
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# TDE FIXME: strange
if self._context.get('search_default_province'):
args += [('state_id', '=', self._context['search_default_province'])]
return super(ResKabupaten, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
class ResKecamatan(models.Model):
_name = "res.kecamatan"
_description = "List Kecamatan"
name = fields.Char(string='Kecamatan')
state_id = fields.Many2one('res.country.state', string="Province")
kabupaten_id = fields.Many2one('res.kabupaten', string="Kabupaten")
kelurahan_line = fields.One2many('res.kelurahan', 'kecamatan_id', string='Kelurahan')
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# TDE FIXME: strange
if self._context.get('search_default_kabupaten'):
args += [('kabupaten_id', '=', self._context['search_default_kabupaten'])]
if self._context.get('search_default_province'):
args += [('state_id', '=', self._context['search_default_province'])]
return super(ResKecamatan, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
class ResKelurahan(models.Model):
_name = "res.kelurahan"
_description = "List Kelurahan"
name = fields.Char(string='Kelurahan')
state_id = fields.Many2one('res.country.state', string="Province")
kabupaten_id = fields.Many2one('res.kabupaten', string="Kabupaten")
kecamatan_id = fields.Many2one('res.kecamatan', string="Kecamatan")
zip = fields.Char("Kode Post")
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
# TDE FIXME: strange
if self._context.get('search_default_zip'):
args += [('zip', '=', self._context['search_default_zip'])]
if self._context.get('search_default_kecamatan'):
args += [('kecamatan_id', '=', self._context['search_default_kecamatan'])]
if self._context.get('search_default_kabupaten'):
args += [('kabupaten_id', '=', self._context['search_default_kabupaten'])]
if self._context.get('search_default_province'):
args += [('state_id', '=', self._context['search_default_province'])]
return super(ResKelurahan, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)
class res_race(models.Model):
_name = "res.race"
_description = "List RAS/Suku"
name = fields.Char(string='RAS', required=True , translate=True)
class res_religion(models.Model):
_name = "res.religion"
_description = "List Agama"
name = fields.Char(string='Religion', required=True , translate=True)
| [
"noreply@github.com"
] | hassanfadl.noreply@github.com |
5b052cd8705d0a4fee9dd1e544a4ae6904c4cdd4 | 14448d19d94055266f3d6f08904654110ccf132e | /naveen/settings.py | b53bede9ded527ce45e32056e38d8a93facb2d08 | [] | no_license | vamsisia/naveen | b0c4860dae9560cd774075598009f2294125e8f0 | 2ba9ee2ab5f406f42003ed70fc190be717fd22c4 | refs/heads/main | 2023-05-09T20:17:49.140831 | 2021-06-09T04:18:18 | 2021-06-09T04:18:18 | 375,217,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,250 | py | """
Django settings for naveen project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-)9o2cw!eqw=dh+dunv#a(a81l1st(qzc_e1^%)_w2ol$gof6q5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'naveen.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'naveen.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"vamsi@Vamsis-MacBook-Air.local"
] | vamsi@Vamsis-MacBook-Air.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.