blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa336fbb3992233b48b4fd7554d8a11104ac743d | 309859bac335fd17a63f60a13df92537d86db7cf | /UserDataHub/_version.py | c194335ecb3634e4d9b75b715cdf2383d25c8c54 | [
"BSD-3-Clause"
] | permissive | darden-data-science/UserDataHub | b2eeb2e8a84f4e2574e6c0232f5b240d9699253e | e6b10a1cacc8f761fbb222596cfc957acafbb1d7 | refs/heads/master | 2022-12-02T02:29:47.168400 | 2020-08-10T15:48:13 | 2020-08-10T15:48:13 | 284,826,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | """userdatahub version info"""
# Copyright (c) Michael Albert.
# Distributed under the terms of the Modified BSD License.
version_info = (
0,
0,
1,
'dev', # comment-out this line for a release
)
__version__ = '.'.join(map(str, version_info[:3]))
if len(version_info) > 3:
__version__ = '%s%s' % (__version__, version_info[3]) | [
"albertmichaelj@gmail.com"
] | albertmichaelj@gmail.com |
6207df885f2ccbdfe41ab6b5dd75ae3a555588d0 | 3e286c0fff22b4082d0c3a8edaffd5c91b806cf4 | /simplemooc/simplemooc/core/urls.py | 4f39e0a7334f44ac736d26103112091378c7ffa5 | [] | no_license | izaguerreiro/simplemooc | 7aa42d0715a8a35ca0f0b1dfba9bf30d538dc065 | d6bb092b035360eb8742aa6e7d5b41271fe96862 | refs/heads/master | 2020-04-12T06:43:37.960136 | 2017-02-06T17:34:18 | 2017-02-06T17:34:18 | 65,295,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from django.conf.urls import url
from simplemooc.core import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^contato/$', views.contact, name='contact'),
]
| [
"izaguerreiro@gmail.com"
] | izaguerreiro@gmail.com |
d7e9068c7d51c6c6aeb60c3d438446858ec1a0cc | 0952d6ca543090cfbc07b1c88de63d74acc3bd23 | /src/newenv/rebu/marketplace/test_orders.py | 172f22fda483efaeebde251a556793f356da8aeb | [] | no_license | mmw5hy/rebu-ii | 6d502c6ef344d67d673f6e53421d275878dcde25 | bd0c9e55a0561199cd29996b8eff713bbefcd6bf | refs/heads/master | 2020-04-29T05:33:33.140049 | 2019-03-15T20:28:55 | 2019-03-15T20:28:55 | 175,886,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,177 | py | from django.test import TestCase, Client
import urllib.request
import urllib.parse
from django.core.files.uploadedfile import SimpleUploadedFile
import os.path
from marketplace.models import Item, Order, OrderItem, Cart
from accounts.models import Consumer, Producer
class OrderTestHere(TestCase):
def setUp(self):
""" Setup method that runs before each test """
self.user = Consumer.objects.create(
first_name='testuser',
last_name='userLast',
email='test@email.com',
address='123 test drive',
is_producer = False
)
self.user.set_password("123")
self.user.username = "test@email.com"
self.user.save()
self.producer = Producer.objects.create(
first_name='testuser',
last_name='userLast',
email='producer@email.com',
address='123 test drive',
store_name="Pete's shop",
active=True,
is_producer = True
)
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, '../accounts/static/images/item-images/default_food_image.jpg')
self.producer.image = SimpleUploadedFile(name='default_food_image.jpg', content=open(path, 'rb').read(), content_type='image/jpeg')
self.producer.documents = SimpleUploadedFile(name='default_food_image.jpg', content=open(path, 'rb').read(), content_type='image/jpeg')
self.producer.set_password("123")
self.producer.username = "producer@email.com"
self.producer.save()
self.item = Item.objects.create(
ingredients='cake, lemon',
price='5.99',
description='really great',
rating=3.3,
available=True,
name="Cake",
producer=self.producer,
image="hi.png"
)
self.item.save()
self.order = Order.objects.create(
from_address='From Address',
to_address='To Address',
consumer_id=1,
producer_id=1,
completed=False,
price=100
)
self.order.save()
self.order.items.add(self.item.id)
self.order_item = OrderItem.objects.create(
count=3,
item_id = self.item.id)
self.order_item.save()
self.order.items.add(self.order_item.id)
self.order.save()
self.cart = Cart.objects.create(
consumer_id=1,
producer_id=1,
price=100)
self.cart.items.add(self.order_item.id)
self.cart.save()
self.c = Client()
def test_get_all_orders(self):
""" Test to get all the orders that currently exist """
self.response = self.c.get('/api/orders/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "SUCCESS")
self.assertEquals(len(self.data['data']), 1)
self.assertEquals(self.data['data'][0]['from_address'], "From Address")
def test_get_single_order(self):
""" Test to get a single order that currently exists """
self.response = self.c.get('/api/orders/1/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "SUCCESS")
self.assertEquals(len(self.data['data']), 1)
self.assertEquals(self.data['data'][0]['id'], 1)
self.assertEquals(self.data['data'][0]['from_address'], "From Address")
def test_create_order(self):
""" Test to create an order with all required fields."""
self.c.post('/api/orders/1/', {
'from_address': 'From Address',
'to_address': 'To Address',
'consumer_id': 1,
'producer_id': 1,
'completed': False,
'price': 100,
'items': [self.order_item.id]
})
self.response = self.c.get('/api/orders/1/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "SUCCESS")
self.assertEquals(self.data['data'][0]['id'], 1)
self.assertEquals(self.data['data'][0]['from_address'], "From Address")
self.assertEquals(self.data['data'][0]['to_address'], "To Address")
def test_edit_single_order(self):
""" Test to edit an order that currently exists."""
self.c.post('/api/orders/1/', {
'from_address': 'From Address2',
'to_address': 'To Address2',
'consumer_id': 1,
'producer_id': 1,
'completed': False,
'price': 100,
'items': [self.order_item.id]
})
self.response = self.c.get('/api/orders/1/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "SUCCESS")
self.assertEquals(self.data['data'][0]['id'], 1)
self.assertEquals(self.data['data'][0]['from_address'], "From Address2")
self.assertEquals(self.data['data'][0]['to_address'], "To Address2")
def test_delete_single_order(self):
""" Test to delete an order that currently exists."""
self.response = self.c.delete('/api/orders/1/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "SUCCESS")
self.response = self.c.get('/api/orders/1/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "FAILED")
def test_shopping_cart(self):
""" Test for existence of valid shopping cart. """
cart = Cart.objects.all()
self.assertEquals(len(cart), 1)
def test_shopping_cart_checkout(self):
""" Test checkout page functionality """
client = Client()
logged_in = client.login(username="test@email.com", password="123")
self.assertTrue(logged_in)
response = client.get('/orders/checkout/2/')
self.assertEqual(response.status_code, 200)
def test_edit_single_order_without_all_fields(self):
""" Test to edit an order that currently exists without correctly specifying all fields."""
self.c.post('/api/orders/1/', {
'from_address': 'From Address2',
'to_address': 'To Address2',
'consumer_id': 1,
'producer_id': 1,
'completed': False,
'price': 100,
'items': [self.order_item.id]
})
self.c.post('/api/orders/1/', {
'from_address': 'From Address2',
'consumer_id': 1,
'producer_id': 1,
'completed': False,
'price': 50,
'items': [self.order_item.id]
})
self.response = self.c.get('/api/orders/1/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "SUCCESS")
self.assertEquals(self.data['data'][0]['id'], 1)
self.assertEquals(self.data['data'][0]['price'], '100.00')
def test_delete_nonexistent_order(self):
""" Test to delete an order that currently exists."""
self.response = self.c.delete('/api/orders/2/')
self.data = self.response.json()
self.assertEquals(self.data['status'], "FAILED")
| [
"michael.m.white@live.com"
] | michael.m.white@live.com |
caf0a59d6178ca33e5f7bc40df9ef606e7f17b5b | 9195db580697592398e28dab939f5f918fc3bcde | /src/03.Pick_testData.py | 7ffcee32c638f12fd5d52dd1c611d10c1ac8028e | [] | no_license | tychen5/BERT_chinese_LM_processing | 09d7d70229db21cc07e0bb327e1e9886632980ec | c53bd010cbf092966b55f8e8e2c5483f9da7fcf0 | refs/heads/master | 2020-08-31T14:30:37.769990 | 2020-03-09T02:41:41 | 2020-03-09T02:41:41 | 218,711,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | import os
import random
import re
from tqdm import tqdm
from googletrans import Translator
root_dir = "../Data/THUCNews_trad/"
cat_dir = next(os.walk(root_dir))[1]
# print(cat_dir)
'''
Goal: random pick 10% file for testing and seperate too long by |||
'''
translator = Translator()
def write_file(output, sent, text_id):
if not os.path.isdir(output):
os.makedirs(output, exist_ok=True)
out_path = output + '/'+text_id
with open(out_path, 'w', encoding='utf8') as f:
for item in sent:
f.write(item[0] + '\n')
max_length = 512 # BERT need to be lower than 512
for cat in tqdm(cat_dir):
cat_en = translator.translate(cat).text
in_dir = root_dir + cat + '/'
files = next(os.walk(in_dir))[2]
test_files_num = int(len(files) * 0.05) # take 5% data to test
test_files = random.choices(files, k=test_files_num)
sentences = []
for text_id in test_files:
in_file_path = in_dir + text_id
r = open(in_file_path, 'r', encoding='utf-8')
text = r.read()
text = re.sub(r'\n', "", text)
text = re.sub(r'\u3000', "", text)
length = len(text)
iters = int(length / max_length) + 1
for i in range(iters):
if i % 2 == 1: # if it's even number (end)
sentences.append([sent + ' ||| ' + text[i * max_length:(i + 1) * max_length]])
elif i == iters - 1: # if it's odd number end
sentences.append([text[i * max_length:(i + 1) * max_length]])
else: # if it's odd number
sent = text[i * max_length:(i + 1) * max_length]
out_dir = '../Data/Test_rev/' + cat_en + '/' # one category many files
write_file(out_dir, sentences, text_id)
sentences = []
# sentences.append(['']) # many test documents seperate by \n
# out_dir = '../Data/Test/' + cat_en + '/' # one category one file
# write_file(out_dir, sentences)
| [
"leotchen@deloitte.com.tw"
] | leotchen@deloitte.com.tw |
601d8425b00b2f6b3f92bb01f4f55c2f4ce2f092 | 1b410cd110079e046117ae051b6cad32e62f201d | /function_jwt.py | 7cd03a451b1e69ef79884498f420569435b03673 | [] | no_license | NelsonCode/flask-auth-jwt | bd1ae156a11cfd643bd5df2fedebb0edd240c85f | ec55513d4e0dea49cb0271de426938fe4698b3de | refs/heads/master | 2023-08-16T10:38:28.208213 | 2021-09-23T21:37:04 | 2021-09-23T21:37:04 | 402,959,903 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | from jwt import encode, decode
from jwt import exceptions
from os import getenv
from datetime import datetime, timedelta
from flask import jsonify
def expire_date(days: int):
now = datetime.now()
new_date = now + timedelta(days)
return new_date
def write_token(data: dict):
token = encode(payload={**data, "exp": expire_date(2)},
key=getenv("SECRET"), algorithm="HS256")
return token.encode("UTF-8")
def validate_token(token, output=False):
try:
if output:
return decode(token, key=getenv("SECRET"), algorithms=["HS256"])
decode(token, key=getenv("SECRET"), algorithms=["HS256"])
except exceptions.DecodeError:
response = jsonify({"message": "Invalid Token"})
response.status_code = 401
return response
except exceptions.ExpiredSignatureError:
response = jsonify({"message": "Token Expired"})
response.status_code = 401
return response
| [
"nelsonher019@gmail.com"
] | nelsonher019@gmail.com |
b01528b02aaaeaf6601446f6bcbe39ab03432c27 | ca41157d95d87a9899730637fd2339479ce80088 | /gPhoton/photonpipe/__init__.py | e8bccb42bbd96b09fe8bd03c3656a75895354873 | [
"BSD-3-Clause"
] | permissive | MillionConcepts/gPhoton2 | b5c2b36b68cfcc38b324f371a9677b86e51709df | 0f1b054094bd476b2998e5b32aceb7e0a764ebda | refs/heads/main | 2023-08-21T09:55:24.598275 | 2023-08-11T06:28:12 | 2023-08-11T06:28:12 | 383,023,797 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | """
.. module:: PhotonPipe
:synopsis: A recreation / port of key functionality of the GALEX mission
execute_pipeline to generate calibrated and sky-projected photon-level data from
raw spacecraft and detector telemetry. Generates time-tagged photon
lists given mission-produced -raw6, -scst, and -asprta data.
"""
from .core import execute_photonpipe
| [
"mstclair@millionconcepts.com"
] | mstclair@millionconcepts.com |
247972aaa39a632dc35b5983f933198777e8b5d0 | 6a1f69c2b11a1cfda8a2e63006b0efa721ed8d7b | /scoreboard_backend/const.py | f832e88117df79f44f73716114489f5a435ae415 | [] | no_license | o-o-overflow/dc2021q_scoreboard | 54aa471daf6263225e8c45c71553f2ffb26c22c7 | bb0e0054fec807dc3f6472d0b1fa5ee21f607b92 | refs/heads/main | 2023-04-21T19:44:39.168136 | 2021-05-03T07:39:26 | 2021-05-03T07:39:26 | 356,939,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | ACCESS_TOKEN_DURATION = 3600 # Seconds
COMPETITION_END = 1_620_000_000
COMPETITION_START = 1_619_827_200
REGISTRATION_PROOF_OF_WORK = "00ff00"
SUBMISSION_DELAY = 30 # Seconds
TIMESTAMP_MAX_DELTA = 600 # Seconds
TOKEN_PROOF_OF_WORK = "f00f"
| [
"bbzbryce@gmail.com"
] | bbzbryce@gmail.com |
b0ae31b9df9b8ae5224d7d345f53998f23a3adec | b171eadf42c03ae0e013599e43ec815db0dfce3c | /repository/mongo.py | 81ade7f414235ae0bc742c1d64faad0d1db12e18 | [] | no_license | HarikrishnanMidhun77/Flask-Server | b55a64c4541ae89e7e1fea11fe8c4af6d453d0d2 | 7ec26ee24df0151e2589ee7b7540563b1880a2c5 | refs/heads/main | 2023-08-17T12:48:54.095680 | 2021-10-02T16:04:47 | 2021-10-02T16:04:47 | 412,700,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import os
from pymongo import MongoClient
COLLECTION_NAME = 'twitter'
class MongoRepository(object):
def __init__(self):
mongo_url = 'mongodb+srv://harikrishnan_midhun2:DM_pswd@cluster0.iwigi.mongodb.net/myFirstDatabase?retryWrites=true&w=majority'
self.db = MongoClient(mongo_url).twitter
def find_all(self, selector):
return self.db.twitter.find(selector)
def find(self, selector):
return self.db.twitter.find_one(selector)
def create(self, kudo):
return self.db.twitter.insert_one(kudo)
def update(self, selector, kudo):
return self.db.twitter.replace_one(selector, kudo).modified_count
def delete(self, selector):
return self.db.twitter.delete_one(selector).deleted_count | [
"harikrishnanmidhun77@gmail.com"
] | harikrishnanmidhun77@gmail.com |
8fc483eab2ef6e1184e3fb8c2433373ecc4457e8 | eefbc723d9f354adf96cb85ff1639dc5f7799086 | /Configuration/python/samples/dilepton/sync_ttjets.py | e03d6211dd6a0939119b888a0fad959d0e47bbe8 | [] | no_license | kovalch/TopAnalysis | e4bb2c80be61734c56c124522ecf1719a801bf19 | b7c9bb5268e0680cfbe1e0366e1811466e7846a4 | HEAD | 2016-09-05T10:15:16.810334 | 2015-07-23T13:27:19 | 2015-07-23T13:27:19 | 39,569,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'file:///scratch/hh/current/cms/user/wbehrenh/FEEE3638-F297-E011-AAF8-00304867BEC0.root'
#'/store/mc/Summer11/TTJets_TuneZ2_7TeV-madgraph-tauola/AODSIM/PU_S4_START42_V11-v1/0000/FEEE3638-F297-E011-AAF8-00304867BEC0.root'
])
| [
""
] | |
18d4c0674386cbedcfa039b92e7862b2384b688b | 5b896786df43baeac4a181f454dd7cdf1c236357 | /trader.py | 86fcf1cc5de73142ff6bedd92546757c4c1b5717 | [
"Unlicense"
] | permissive | tarasbob/AugurTrader | 88f57f1b10f3b622575eaba3e464f782ee2155b1 | 8ff801468a21ba686ef3dc0d019db1c0c6dee797 | refs/heads/master | 2020-04-24T06:46:35.878504 | 2019-02-21T02:55:20 | 2019-02-21T02:57:52 | 171,777,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | import json
import requests
class ContractConnector(object):
def __init__(self, contract_addr: str, client_addr='http://localhost:8545'):
self.client_addr = client_addr
self.contract_addr = contract_addr
def send_call(self, data: str):
'''Make a call without sending a transaction. Read from the blockchain.'''
params = {'to': "0x" + self.contract_addr, 'data': "0x" + data}
r = self._send_request('eth_call', [params, 'latest'])
return r["result"]
def _send_request(self, method, params):
headers = {'content-type': 'application/json'}
payload = {'jsonrpc': '2.0', 'method': method, 'params': params, 'id': 1}
return requests.post(self.client_addr, data=json.dumps(payload), headers=headers).json()
| [
"t.bobrovytsky@gmail.com"
] | t.bobrovytsky@gmail.com |
caf56a5c83f320979bb653ba7bc9b3700d182468 | 712165fee2593659ff9d7df5dc367363628b906b | /6_8.py | 5a8b3f5b0476124b2d35a6b624fd45b74b3364a8 | [] | no_license | theharshbhatia/Python-for-software-design-book-solutions | 75e99d062d30e593c9278181b3aaf43be121afdf | 76ca11d7c8e5344084303ec19f47d331c56d7395 | refs/heads/master | 2016-09-10T00:47:28.313129 | 2014-08-05T19:34:02 | 2014-08-05T19:34:02 | 10,083,849 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def gcd(a,b):
if b==0:
print("gcd is equal to",a)
else:
r=a%b
return gcd(b,r)
gcd(25,125)
| [
"horrorbyharsh@gmail.com"
] | horrorbyharsh@gmail.com |
96c7caaa1e26db3b0e0ea72463ddaa0e92e37523 | c015ab0242ebf0bb8f7f2c96a97cfe640389840e | /first_project/settings.py | 3690a3168579e6417a31234511aacc7f11d14b1d | [] | no_license | Mabdurahman68/djangolab3 | 5f7fd9f6798db700b9343c1335e4b4909e42a65f | 086fae991e34b7d3be01ea9afd600bbd7015e277 | refs/heads/master | 2023-04-26T14:26:47.914435 | 2021-05-18T09:06:45 | 2021-05-18T09:06:45 | 368,464,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,442 | py | """
Django settings for first_project project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#l9^x#2zb2027gid_01t!t^4eyg%5#zvvi#!&&@8jw3(o^yl2p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'osMansoura'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'first_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'first_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'first_app',
'HOST': 'localhost',
'PORT': '3306',
'USER': 'root',
'PASSWORD': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = os.path.join(BASE_DIR, '/osMansoura/static/')
#STATIC_URL= "/static/"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"muhammed.abdurahman93@gmail.com"
] | muhammed.abdurahman93@gmail.com |
a7fca10c09a92f5ccfadb71b8801deab42b19be1 | b4b130345cc4751d0435cdd380f109248c316fd1 | /hw3/hw3_4.py | 84cabd9f60b3b8259fba4d352d8e5cae6c728dd9 | [] | no_license | botantantan/pangkui | 81826c3abf47c489eed63c6914a37018cdb562bb | 31d661cead3b949b78da1334cd68ec1ad82d179b | refs/heads/master | 2023-02-15T23:15:09.994401 | 2021-01-13T12:31:33 | 2021-01-13T12:31:33 | 324,384,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | n = int(input())
total = float(1)
a = float(2)
b = float(3)
minus = float(0)
for i in range (n-1):
if (i%2==0):
minus = -1
else:
minus = 1
total = total + (minus*a/b)
a = a + 1
b = b + 2
total = round(total, 3)
print(total) | [
"fritz_gerald@live.com"
] | fritz_gerald@live.com |
0e204dc36eebf084d9bf69bd3a6db0a9dc545a26 | 17436b5dd6807214ea59c315c476a5edb19a7241 | /Python (Machine learning)/Ohjelmoinnin alkeet (Ohjelmointi 1)/Kierros 5/ListaPaluuarvona.py | 8a09da2eac0c8b17f1cdb0dcbbe4a4b1f7426363 | [] | no_license | ARuhala/Old-and-unclean-school-projects | a9a3729b40bf73fa4935cbb58d3c26e500703be3 | 24835bee48ebf0f862bc2bcf7b34cd8496f39f8d | refs/heads/master | 2022-12-03T20:59:23.966829 | 2020-08-24T15:39:42 | 2020-08-24T15:39:42 | 287,828,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | def listaaja(montakolukua):
lista=[]
for i in range (montakolukua):
arvo=int(input())
lista.append(arvo)
return lista
def main():
montakolukua=int(input("Kuinka monta lukua haluat käsitellä: "))
print("Syötä {:1d} kpl lukuja:".format(montakolukua))
lista=listaaja(montakolukua)
mitäetsitään=int(input("Syötä etsittävä luku: "))
montako=lista.count(mitäetsitään)
if montako == 0:
print("{:1d} ei esiinny syöttämiesi lukujen joukossa.".format(mitäetsitään))
else:
print("{:1d} esiintyy syöttämiesi lukujen joukossa {:1d} kertaa.".format(mitäetsitään,montako))
main() | [
"antti.ruhala@gmail.com"
] | antti.ruhala@gmail.com |
72904d25d563858df7c4a40a81f197f5d524e539 | 548183e30e0fa796ef12f5eeb5cd8335619b0f87 | /VehicleInspection/apps/account/serializers.py | ae0e273ea0fdf04f058c456d63d82d0433ee97f7 | [
"Apache-2.0"
] | permissive | iyaoqiujie/VehicleInspection | 53e6f0a4e54fabe804ae3b1615fe62286a0498db | 63ed77eca308c6f5e6cfb63dd57ff06bb6c2ae08 | refs/heads/master | 2022-12-25T21:11:05.664416 | 2019-07-10T07:28:13 | 2019-07-10T07:28:13 | 194,578,543 | 0 | 0 | Apache-2.0 | 2022-12-08T05:18:03 | 2019-07-01T01:17:19 | Vue | UTF-8 | Python | false | false | 6,000 | py | # -*- coding: utf-8 -*-
# Author:Qiujie Yao
# Email: yaoqiujie@gscopetech.com
# @Time: 2019-04-16 08:48
import re
from datetime import datetime, timedelta
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from django.contrib.auth import get_user_model
from django.utils import timezone
from VehicleInspection.settings import REGEX_MOBILE
from .models import VerifyCode
import logging
User = get_user_model()
myLogger = logging.getLogger('insp.account')
class SmsSerializer(serializers.Serializer):
mobile = serializers.CharField(max_length=16)
def validate_mobile(self, value):
"""
验证手机号码(函数名称必须为validate_ + 字段名)
"""
# 手机是否注册
if User.objects.filter(mobile=value).count():
raise serializers.ValidationError('用户已经存在')
# 验证手机号码是否合法
if not re.match(REGEX_MOBILE, value):
raise serializers.ValidationError('手机号码非法')
# 验证码发送频率
two_minutes_ago = datetime.now() - timedelta(hours=0, minutes=2, seconds=0)
# 添加时间大于2分钟以前。也就是距离现在还不足2分钟
if VerifyCode.objects.filter(created__gt=two_minutes_ago, mobile=value).count():
raise serializers.ValidationError('距离上一次发送未超过120秒')
return value
class UserRegSerializer(serializers.ModelSerializer):
username = serializers.CharField(label='用户名', help_text='请输入用户名', required=True,
validators=[UniqueValidator(queryset=User.objects.all(), message='用户已经存在')])
password = serializers.CharField(label='密码', help_text='密码', write_only=True, style={'input_type': 'password'})
mobile = serializers.CharField(label='手机号码', help_text='手机号码', required=True, write_only=True,)
smscode = serializers.CharField(label='验证码', required=True, write_only=True, max_length=4, min_length=4,
error_messages={
'blank': '请输入验证码',
'required': '请输入验证码',
'max_length': '验证码格式错误',
'min_length': '验证码格式错误'
}, help_text='验证码')
code = serializers.IntegerField(default=20000, read_only=True)
def validate_smscode(self, code):
# get与filter的区别: get有两种异常,一个是有多个,一个是一个都没有。
# try:
# verify_records = VerifyCode.objects.get(mobile=self.initial_data['username'], code=code)
# except VerifyCode.DoesNotExist as e:
# pass
# except VerifyCode.MultipleObjectsReturned as e:
# pass
# 验证码在数据库中是否存在,用户从前端post过来的值都会放入initial_data里面,排序(最新一条)。
verify_records = VerifyCode.objects.filter(mobile=self.initial_data['mobile']).order_by('-created')
if verify_records:
# 获取到最新一条
last_record = verify_records[0]
# 有效期为五分钟。
five_minutes_ago = timezone.now() - timedelta(hours=0, minutes=5, seconds=0)
if five_minutes_ago > last_record.created:
raise serializers.ValidationError('验证码过期')
if last_record.code != code:
raise serializers.ValidationError('验证码错误')
else:
raise serializers.ValidationError('验证码错误')
# 不加字段名的验证器作用于所有字段之上。attrs是字段 validate之后返回的总的dict
def validate(self, attrs):
del attrs['smscode']
return attrs
def create(self, validated_data):
user = User(username=validated_data['username'],
mobile=validated_data['mobile'],)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = User
fields = ('username', 'code', 'mobile', 'password', 'smscode')
class UserAddSerializer(serializers.ModelSerializer):
"""
Admin用户手动添加用户,用户初始密码为123456
"""
username = serializers.CharField(label="用户名", help_text="请输入用户名", required=True,
validators=[UniqueValidator(queryset=User.objects.all(), message="用户已经存在")])
mobile = serializers.CharField(label="手机号码", help_text="手机号码", required=True, write_only=True, )
code = serializers.IntegerField(default=20000, read_only=True)
def validate(self, attrs):
myLogger.debug(attrs)
return attrs
def create(self, validated_data):
user = User(username=validated_data['username'],
mobile=validated_data['mobile'],
name=validated_data['name'],
email=validated_data['email'],
company=validated_data['company'],
role=validated_data['role'],
is_certificated=validated_data['is_certificated'])
user.set_password('123456')
user.save()
return user
class Meta:
model = User
fields = ('code', 'username', 'name', 'mobile', 'email', 'company', 'role', 'is_certificated')
class UserDetailSerializer(serializers.ModelSerializer):
"""
用户详情序列化
"""
username = serializers.ReadOnlyField()
code = serializers.IntegerField(default=20000, read_only=True)
class Meta:
model = User
fields = ('code', 'id', 'username', 'mobile', 'email', 'company', 'avatar', 'role',
'id_card', 'is_certificated', 'can_order', 'date_joined')
| [
"yaoqiujie@gscopetech.com"
] | yaoqiujie@gscopetech.com |
d8ee0e9c48cd183865b678959c67cdc5a94450b8 | 8040f8e0b48285f1e0544ff8cd47bb1d09533fe4 | /jessica_app.py | 654cd1307c7dade103ee8789bf2856b3a4d0169a | [
"MIT"
] | permissive | Jaimin09/Jessica---A-Virtual-Assistant | da16ce92bf2d3903477142e597f841c5374683ce | 10942588e2d1b69177d388d8808991f8ccbb1343 | refs/heads/master | 2021-12-23T16:59:26.736380 | 2021-12-14T09:44:48 | 2021-12-14T09:44:48 | 154,632,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | from flask import Flask, request, jsonify
import numpy as np
import tensorflow as tf
from keras.models import load_model
from chat_utils import *
from helper_functions import *
import webbrowser
import speech_recognition as sr
import winsound
app = Flask(__name__)
def get_model():
global model, graph
model = load_model('jessica_model.h5')
graph = tf.get_default_graph()
print("* Model Loaded successfully !")
print("* Loading Model ...")
get_model()
words_vocab = get_vocab('all_words_mine.txt')
ques_data = np.genfromtxt('questions.txt', dtype = 'str', delimiter = '\n', encoding = 'utf8')
_, inv_sent_vocab, _, _ = get_everything_ans_sentences('answers.txt')
m = ques_data.shape[0]
Tq = 20
n_s = 128
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
@app.route("/predict", methods = ['POST'])
def predict():
message = request.get_json(force = True)
data = message['data']
if data == "":
r = sr.Recognizer()
winsound.PlaySound('chime-short.wav', winsound.SND_FILENAME)
with sr.Microphone() as source:
print("Say something!")
r.pause_threshold = 0.6
audio = r.listen(source)
winsound.PlaySound('chime.wav', winsound.SND_FILENAME)
print("audio recorded!")
msg_data = remove_profanity(r.recognize_google(audio).lower())
try:
print("You said: " + msg_data)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
my_msg = msg_data
else:
my_msg = data
msg_data = data
data = remove_profanity(msg_data)
data = filter_zero(data)
pred_output = check_post(data)
data = filter_three(filter_two(filter_one((data))))
data = remove_unknown(data, words_vocab)
if not pred_output:
if len(data.split()) < 21 :
data = convert_example_to_indices(data, words_vocab, Tq)
data = convert_to_one_hot(data, C= len(words_vocab)).reshape(1, Tq, len(words_vocab))
with graph.as_default():
prediction = model.predict([data, s0, c0])
prob = max(max(prediction))
print(prob)
if prob < 0.60:
pred_output = "Good for you !"
else:
prediction = np.argmax(prediction, axis = -1)
for i in prediction:
if isinstance(inv_sent_vocab[i], str) == True :
pred_output = inv_sent_vocab[i]
else :
pred_output = inv_sent_vocab[i]()
else:
pred_output = "Sorry ! but can you short it down ? Please ."
response = {
'prediction' : "Jessica: " + pred_output,
'my_msg' : "You: "+ my_msg
}
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
| [
"noreply@github.com"
] | Jaimin09.noreply@github.com |
b981510a735fabed209273d3dcb42ef0ee5ae231 | c5780c664692610031823c749337bdccb41f9021 | /main.py | 724638ae162f33d6e82fc077605406467b921deb | [] | no_license | amritesh-dasari/Crypto-Ciphers | eb2e3db5122f1c5c01db9ebcd13da0857a3d1684 | 402a0a2ffa08dec746e5a0a81abf2f1bbc523234 | refs/heads/master | 2020-07-12T03:47:58.834174 | 2019-09-01T09:06:29 | 2019-09-01T09:06:29 | 204,710,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,631 | py | from Additive import AddCipher
from Multiplicative import Multicipher
from Affine import AffineCipher
def Additive():
print "Additive Cipher Main Menu"
print "1. Encrypt"
print "2. Decrypt"
print "3. Return"
ch=input("Enter your choice: ")
if ch==1:
plain=raw_input("Enter the string to be encrypted : ")
key=input("Enter the Encryption key : ")
x=AddCipher()
cipher=x.encrypt(plain,key)
print "Encrypted using ",x.name," Cipher"
print "The encrypted string is : ",cipher
elif ch==2:
cipher=raw_input("Enter the string to be decrypted : ")
key=input("Enter the Decryption key : ")
x=AddCipher()
plain=x.decrypt(cipher,key)
print "Decrypted using ",x.name," Cipher"
print "The decrypted string is : ",plain
else:
print "Returning to Main Menu"
def Multiplicative():
print "Multiplicative Cipher Main Menu"
print "1. Encrypt"
print "2. Decrypt"
print "3. Return"
ch=input("Enter your choice: ")
if ch==1:
plain=raw_input("Enter the string to be encrypted : ")
while True:
key=input("Enter the Encryption key : ")
if key in [1,3,5,7,9,11,15,17,19,21,23,25]:
break
else:
print "Please enter correct key"
x=Multicipher()
cipher=x.encrypt(plain,key)
print "Encrypted using ",x.name," Cipher"
print "The encrypted string is : ",cipher
elif ch==2:
cipher=raw_input("Enter the string to be decrypted : ")
while True:
key=input("Enter the Encryption key : ")
if key in [1,3,5,7,9,11,15,17,19,21,23,25]:
break
else:
print "Please enter correct key"
x=Multicipher()
plain=x.decrypt(cipher,key)
print "Decrypted using ",x.name," Cipher"
print "The decrypted string is : ",plain
else:
print "Returning to Main Menu"
def Affine():
print "Affine Cipher Main Menu"
print "1. Encrypt"
print "2. Decrypt"
print "3. Return"
ch=input("Enter your choice: ")
if ch==1:
plain=raw_input("Enter the string to be encrypted : ")
while True:
key1=input("Enter the Encryption key 1 : ")
if key1 in [1,3,5,7,9,11,15,17,19,21,23,25]:
break
else:
print "Please enter correct key"
key2=input("Enter the Encryption key 2 : ")
x=AffineCipher()
cipher=x.encrypt(plain,key1,key2)
print "Encrypted using ",x.name," Cipher"
print "The encrypted string is : ",cipher
elif ch==2:
cipher=raw_input("Enter the string to be decrypted : ")
while True:
key1=input("Enter the Encryption key 1 : ")
if key1 in [1,3,5,7,9,11,15,17,19,21,23,25]:
break
else:
print "Please enter correct key"
key2=input("Enter the Encryption key 2 : ")
x=AffineCipher()
plain=x.decrypt(cipher,key1,key2)
print "Decrypted using ",x.name," Cipher"
print "The decrypted string is : ",plain
else:
print "Returning to Main Menu"
while True:
print "Main Menu"
print "1. Additive Cipher"
print "2. Multiplicative Cipher"
print "3. Affine Cipher"
print "4. Exit"
ch=input("Enter your choice: ")
if ch==1:
print
Additive()
elif ch==2:
print
Multiplicative()
elif ch==3:
print
Affine()
elif ch==4:
break
print
| [
"dmamritesh@gmail.com"
] | dmamritesh@gmail.com |
ab1ac177801a816c02429ac504896814590c18e9 | e92dbeb603cb0cae2c6c92554b3c385e51abb283 | /blog/migrations/0001_initial.py | e15a370f9bf6d93e96bcbf6aebd99ffd46f64e2d | [] | no_license | kronalf/myawesomeblog | 2a0ad029d0f85264b9afd2b0545318cdb74f41a8 | e0a9aff9e08784d7f34c97b6b514f63594ae14fd | refs/heads/master | 2023-02-23T08:24:40.745364 | 2021-01-30T11:02:09 | 2021-01-30T11:02:09 | 332,160,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # Generated by Django 3.1.5 on 2021-01-25 13:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('date', models.DateTimeField(auto_now=True)),
('text', models.TextField(max_length=500)),
('image', models.ImageField(upload_to='')),
],
),
]
| [
"kovalev_dg@minudo-home.ru"
] | kovalev_dg@minudo-home.ru |
1209a70cd1039cf7ab6bf3fce31b1a1be8346fac | 87c908f35ae6eb84840f5283ac7177e70b00268b | /chinese.py | d957a8b90145293eaeaca8b253ce5bcc7c85c306 | [] | no_license | jaithehuman/rice_detection | e950bd2d25376d758b60a974c4d6df5abf2298f4 | 1faa1ec2da257033014fe2eb1fd70399b4e00739 | refs/heads/main | 2023-06-05T06:13:56.607629 | 2021-06-25T07:59:05 | 2021-06-25T07:59:05 | 363,875,301 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | import cv2
import sys
# Read
filename = sys.argv[1]
img_rice = cv2.imread(filename)
cv2.imshow('rice', img_rice)
# Grayscale
img_gray = cv2.cvtColor(img_rice, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', img_gray)
# Binarization
ret,thresh1 = cv2.threshold(img_gray, 123, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh', thresh1)
# Corrosion and expansion
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2)) #define rectangular structure element
img_erode = cv2.erode(thresh1, kernel, iterations=1)
cv2.imshow('erode', img_erode)
img_dilated = cv2.dilate(img_erode, kernel)
cv2.imshow('dilate', img_dilated)
# Edge detection
contours, hierarchy = cv2.findContours(img_dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
count = 0
ave_area = 0
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area > 20:
count = count + 1
ave_area = ave_area + area
rect = cv2.boundingRect(contours[i]) #Extract rectangle coordinates
print("number:{} x:{} y:{} area:{}".format(count,rect[0],rect[1], area))#Print coordinates
cv2.rectangle(img_rice,rect,(0,255,0),1)#Draw a rectangle
if area > 150:
count = count + 1
cv2.putText(img_rice,str({count,count-1}), (rect[0], rect[1]), cv2.FONT_HERSHEY_COMPLEX, 0.4, (0, 255, 0), 1) #In the upper left corner of the rice grain Write number
else:
pass
cv2.putText(img_rice,str(count), (rect[0], rect[1]), cv2.FONT_HERSHEY_COMPLEX, 0.4, (0, 255, 0), 1) #Write the number in the upper left corner of the rice grain
ave_area = ave_area / count
# Output
print('The total number is: {}, the average area is: {}'.format(count,ave_area))
cv2.imshow("Contours", img_rice)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"nattarrud@gmail.com"
] | nattarrud@gmail.com |
f1087954cff5fa407500306a5e1e68157ef331a7 | 354f52a263d1ec1b0ec4558f09a87036daf422bd | /PythonExercicios/03-modulos-python/ex019.py | fd32e7f25a35e02589bc5f17153eb13270b49464 | [
"MIT"
] | permissive | mateusmarinho/python3-cursoemvideo | eae35032c8422386383720d49355e4766588fe5e | 706d419865532e156fb80b8a873e18cb90d6e0da | refs/heads/main | 2023-04-09T04:39:31.236773 | 2021-04-21T20:58:32 | 2021-04-21T20:58:32 | 360,300,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | import random
a1 = input('Digite o nome do primeiro aluno: ')
a2 = input('Digite o nome do segundo aluno: ')
a3 = input('Digite o nome do terceiro aluno: ')
a4 = input('Digite o nome do quarto aluno: ')
lista = [a1, a2, a3, a4]
print('O aluno escolhido foi {}.'.format(random.choice(lista)))
| [
"noreply@github.com"
] | mateusmarinho.noreply@github.com |
a4221a26f7a8f15d99820a04fb870d3c580e7c79 | 002f28763ed3e0b2114c1ba950ca0ddbd6be4cdc | /08_Django/day01/Day01/day1/news/views.py | 05147949532d30c137cf3eaa601b9a1ed6874c3d | [] | no_license | rogerbear/tarena_project | e599359b94eece6decc13672c6a920071cb65e4c | d8dc5e84d1a81943e94a72a62e09d44919c617c1 | refs/heads/master | 2020-05-28T00:50:44.248954 | 2019-12-20T07:26:58 | 2019-12-20T07:26:58 | 188,836,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index_views(request):
return HttpResponse('这是news应用中的index视图')
| [
"402100940@qq.com"
] | 402100940@qq.com |
488ddd5c60c724031615fd26d7275674b48d650f | c934e7c27f0e72385218a14b4e2a7e94a747a360 | /google-cloud-sdk/lib/surface/certificate_manager/maps/update.py | 7a2c24ca74ad26f07718becdb48850597811789b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PrateekKhatri/gcloud_cli | 5f74b97494df4f61816026af9460b9c4d8e89431 | 849d09dd7863efecbdf4072a504e1554e119f6ae | refs/heads/master | 2023-03-27T05:53:53.796695 | 2021-03-10T04:08:14 | 2021-03-10T04:08:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud certificate-manager maps update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.certificate_manager import certificate_maps
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.certificate_manager import flags
from googlecloudsdk.command_lib.certificate_manager import resource_args
from googlecloudsdk.command_lib.certificate_manager import util
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Update(base.UpdateCommand):
"""Update a certificate map.
This command updates existing certificate map.
## EXAMPLES
To update a certificate map with name simple-map, run:
$ {command} simple-map --description="desc" --update-labels="key=value"
"""
@staticmethod
def Args(parser):
resource_args.AddCertificateMapResourceArg(parser, 'to update')
labels_util.AddUpdateLabelsFlags(parser)
flags.AddDescriptionFlagToParser(parser, 'certificate map')
flags.AddAsyncFlagToParser(parser)
def Run(self, args):
client = certificate_maps.CertificateMapClient()
map_ref = args.CONCEPTS.map.Parse()
new_description = None
if args.IsSpecified('description'):
new_description = args.description
labels_update = None
labels_diff = labels_util.Diff.FromUpdateArgs(args)
if labels_diff.MayHaveUpdates():
orig_resource = client.Get(map_ref)
labels_update = labels_diff.Apply(
client.messages.CertificateMap.LabelsValue,
orig_resource.labels).GetOrNone()
if new_description is None and labels_update is None:
raise exceptions.Error('Nothing to update.')
response = client.Patch(
map_ref, labels=labels_update, description=new_description)
response = util.WaitForOperation(response, is_async=args.async_)
log.UpdatedResource(map_ref.Name(), 'certificate map', is_async=args.async_)
return response
| [
"code@bootstraponline.com"
] | code@bootstraponline.com |
bffabba54a0d868677fd9c4efb7ae8979d02983b | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/12145431.py | c4e465ef175e1027d3e54926439b5bf2956f89a2 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/12145431.py generated: Fri, 27 Mar 2015 15:48:08
#
# Event Type: 12145431
#
# ASCII decay Descriptor: [B+ -> K+ (J/psi(1S) -> mu+ mu- {,gamma} {,gamma}) (eta -> pi+ pi- pi0)]cc
#
from Configurables import Generation
Generation().EventType = 12145431
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_JpsietaK,mm,pipipi=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12145431
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
9916fa68f4ceb29a1038f6d031fe37d34505a80f | d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4 | /AtCoder/ABC-E/143probE.py | 70f6181217ad03009e6bb24c889899e531bd2a4f | [] | no_license | wattaihei/ProgrammingContest | 0d34f42f60fa6693e04c933c978527ffaddceda7 | c26de8d42790651aaee56df0956e0b206d1cceb4 | refs/heads/master | 2023-04-22T19:43:43.394907 | 2021-05-02T13:05:21 | 2021-05-02T13:05:21 | 264,400,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | import sys
input = sys.stdin.readline
def main():
N, M, L = map(int, input().split())
INF = 10**13
dis = [[INF for _ in range(N)] for _ in range(N)]
for _ in range(M):
a, b, c = map(int, input().split())
dis[a-1][b-1] = c
dis[b-1][a-1] = c
Q = int(input())
Query = [list(map(int, input().split())) for _ in range(Q)]
for k in range(N):
for i in range(N):
for j in range(N):
dis[i][j] = min(dis[i][j], dis[i][k]+dis[k][j])
movable = [[] for _ in range(N)]
for i in range(N):
for j in range(N):
if i != j and dis[i][j] <= L:
movable[i].append(j)
for s, t in Query:
s, t = s-1, t-1
q = movable[s]
checked = [False]*N
ok = False
for p in q:
if p == t:
ok = True
break
checked[p] = True
checked[s] = True
if ok:
print(0)
continue
c = 0
while q:
c += 1
qq = []
for p in q:
for np in movable[p]:
if np == t:
ok = True
break
if not checked[np]:
qq.append(np)
checked[np] = True
if ok: break
q = qq
if ok:
print(c)
else:
print(-1)
if __name__ == "__main__":
main() | [
"wattaihei.rapyuta@gmail.com"
] | wattaihei.rapyuta@gmail.com |
8a54ba9ed19cf3c922b135fbd4a13dfb29f9d1ff | cef686dc36c2c5a25870d73ce3e2553bcdeeb556 | /Light/settings.py | 40628f122a01903436d92499657d8085603b1196 | [] | no_license | jeezybrick/light_python | 0846d29dad392830a8ad09f52d4da557531de361 | 5a915bdf271ad5f67cf1677cd989cdcee9c1f35d | refs/heads/master | 2021-01-16T17:45:42.433739 | 2015-09-08T08:20:07 | 2015-09-08T08:20:07 | 41,861,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,045 | py | """
Django settings for Light project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')0nbm3mr^h24z-ab7!#f$z6+=^qa_*uc_(l9$&rp5fy@77#o2k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'notes',
'bootstrap3',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'Light.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Light.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'alternative': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_light',
'USER': 'Ваш логин',
'PASSWORD': 'Ваш пароль',
'HOST': '127.0.0.1',
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'notes/templates'),
)
AUTH_USER_MODEL = 'notes.MyUser'
MEDIA_ROOT = 'notes/static/'
| [
"smooker14@gmail.com"
] | smooker14@gmail.com |
09d531dc3dff4b0261766e7b224783af3d62b201 | c868f65a78563769f09df1c70c7a9fcdb4fd5596 | /chap03/spreadsheet_2/spreadsheet.py | 1918120ba1bf30ab4b2077e992d505054381aab4 | [] | no_license | github-jxm/Gui-Programming-Whit-PyQt4 | 3dfe3f7f98c001de3d44863bcfd6718ed0070cab | d98972ae0597f34dc1d87dcff5929627a1a9cc66 | refs/heads/master | 2020-04-02T16:46:16.797517 | 2016-07-15T17:05:17 | 2016-07-15T17:05:17 | 62,323,480 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,027 | py | #!/usr/bin/env python
#coding=utf8
from PyQt4.QtCore import QChar, QString, pyqtSlot
from PyQt4.QtGui import QWidget,QTableWidget
# from cell import Cell
#include "cell.h"
#include "spreadsheet.h"
class Spreadsheet(QTableWidget):
def __init__(self,parent=None):
super(Spreadsheet, self).__init__(parent)
# 临时
self.setRowCount(3)
self.setColumnCount(2)
self.autoRecalc = True
#
# setItemPrototype(new Cell);
# setSelectionMode(ContiguousSelection);
#
# connect(this, SIGNAL(itemChanged(QTableWidgetItem *)),
# this, SLOT(somethingChanged()));
#
# clear();
def currentLocation(self):
return QChar('A' + self.currentColumn()) + \
QString.number(self.currentRow() + 1)
def currentFormula(self):
return self.formula(self.currentRow(), self.currentColumn());
# QTableWidgetSelectionRange Spreadsheet::selectedRange() const
def selectedRange(self):
# QList<QTableWidgetSelectionRange> ranges = selectedRanges();
ranges = self.selectedRanges()
if (ranges.isEmpty()):
return self.QTableWidgetSelectionRange()
return ranges.first();
def clear(self):
self.setRowCount(0);
self.setColumnCount(0);
# self.setRowCount(RowCount);
# self.setColumnCount(ColumnCount);
# for (int i = 0; i < ColumnCount; ++i) {
# QTableWidgetItem *item = new QTableWidgetItem;
# item->setText(QString(QChar('A' + i)));
# setHorizontalHeaderItem(i, item);
# }
self.setCurrentCell(0, 0);
# bool Spreadsheet::readFile(const QString &fileName)
def readFile(self,fileName):
# QFile file(fileName);
# if (!file.open(QIODevice::ReadOnly)) {
# QMessageBox::warning(this, tr("Spreadsheet"),
# tr("Cannot read file %1:\n%2.")
# .arg(file.fileName())
# .arg(file.errorString()));
# return false;
# }
#
# QDataStream in(&file);
# in.setVersion(QDataStream::Qt_4_3);
#
# quint32 magic;
# in >> magic;
# if (magic != MagicNumber) {
# QMessageBox::warning(this, tr("Spreadsheet"),
# tr("The file is not a Spreadsheet file."));
# return false;
# }
#
# clear();
#
# quint16 row;
# quint16 column;
# QString str;
#
# QApplication::setOverrideCursor(Qt::WaitCursor);
# while (!in.atEnd()) {
# in >> row >> column >> str;
# setFormula(row, column, str);
# }
# QApplication::restoreOverrideCursor();
return True
# bool Spreadsheet::writeFile(const QString &fileName)
def writeFile(self,fileName):
# QFile file(fileName);
# if (!file.open(QIODevice::WriteOnly)) {
# QMessageBox::warning(this, tr("Spreadsheet"),
# tr("Cannot write file %1:\n%2.")
# .arg(file.fileName())
# .arg(file.errorString()));
# return false;
# }
#
# QDataStream out(&file);
# out.setVersion(QDataStream::Qt_4_3);
#
# out << quint32(MagicNumber);
#
# QApplication::setOverrideCursor(Qt::WaitCursor);
# for (int row = 0; row < RowCount; ++row) {
# for (int column = 0; column < ColumnCount; ++column) {
# QString str = formula(row, column);
# if (!str.isEmpty())
# out << quint16(row) << quint16(column) << str;
# }
# }
# QApplication::restoreOverrideCursor();
return True
# void Spreadsheet::sort(const SpreadsheetCompare &compare)
def sort(self,compare):
# QList<QStringList> rows;
# QTableWidgetSelectionRange range = selectedRange();
# int i;
#
# for (i = 0; i < range.rowCount(); ++i) {
# QStringList row;
# for (int j = 0; j < range.columnCount(); ++j)
# row.append(formula(range.topRow() + i,
# range.leftColumn() + j));
# rows.append(row);
# }
#
# qStableSort(rows.begin(), rows.end(), compare);
#
# for (i = 0; i < range.rowCount(); ++i) {
# for (int j = 0; j < range.columnCount(); ++j)
# setFormula(range.topRow() + i, range.leftColumn() + j,
# rows[i][j]);
# }
#
# clearSelection();
# somethingChanged();
pass
@pyqtSlot()
def cut(self):
# copy();
# __del();
pass
@pyqtSlot()
def copy(self):
# QTableWidgetSelectionRange range = selectedRange();
# QString str;
#
# for (int i = 0; i < range.rowCount(); ++i) {
# if (i > 0)
# str += "\n";
# for (int j = 0; j < range.columnCount(); ++j) {
# if (j > 0)
# str += "\t";
# str += formula(range.topRow() + i, range.leftColumn() + j);
# }
# }
# QApplication::clipboard()->setText(str);
pass
@pyqtSlot()
def paste(self):
# QTableWidgetSelectionRange range = selectedRange();
# QString str = QApplication::clipboard()->text();
# QStringList rows = str.split('\n');
# int numRows = rows.count();
# int numColumns = rows.first().count('\t') + 1;
#
# if (range.rowCount() * range.columnCount() != 1
# && (range.rowCount() != numRows
# || range.columnCount() != numColumns)) {
# QMessageBox::information(this, tr("Spreadsheet"),
# tr("The information cannot be pasted because the copy "
# "and paste areas aren't the same size."));
# return;
# }
#
# for (int i = 0; i < numRows; ++i) {
# QStringList columns = rows[i].split('\t');
# for (int j = 0; j < numColumns; ++j) {
# int row = range.topRow() + i;
# int column = range.leftColumn() + j;
# if (row < RowCount && column < ColumnCount)
# setFormula(row, column, columns[j]);
# }
# }
# somethingChanged();
pass
def __del(self):
pass
# QList<QTableWidgetItem *> items = selectedItems();
# if (!items.isEmpty()) {
# foreach (QTableWidgetItem *item, items)
# delete item;
# somethingChanged();
# }
@pyqtSlot()
def selectCurrentRow(self):
pass
# selectRow(currentRow());
@pyqtSlot()
def selectCurrentColumn(self):
pass
# selectColumn(currentColumn());
# void Spreadsheet::recalculate()
@pyqtSlot()
def recalculate(self):
# for (int row = 0; row < RowCount; ++row) {
# for (int column = 0; column < ColumnCount; ++column) {
# if (cell(row, column))
# cell(row, column)->setDirty();
# }
# }
# viewport()->update();
pass
@pyqtSlot()
def setAutoRecalculate(recalc):
# autoRecalc = recalc;
# if (autoRecalc)
# recalculate();
pass
# void Spreadsheet::findNext(const QString &str, Qt::CaseSensitivity cs)
def findNext(self,str, cs):
# int row = currentRow();
# int column = currentColumn() + 1;
#
# while (row < RowCount) {
# while (column < ColumnCount) {
# if (text(row, column).contains(str, cs)) {
# clearSelection();
# setCurrentCell(row, column);
# activateWindow();
# return;
# }
# ++column;
# }
# column = 0;
# ++row;
# }
# QApplication::beep();
pass
# void Spreadsheet::findPrevious(const QString &str,
# Qt::CaseSensitivity cs)
def findPrevious(self, str, cs):
# int row = currentRow();
# int column = currentColumn() - 1;
#
# while (row >= 0) {
# while (column >= 0) {
# if (text(row, column).contains(str, cs)) {
# clearSelection();
# setCurrentCell(row, column);
# activateWindow();
# return;
# }
# --column;
# }
# column = ColumnCount - 1;
# --row;
# QApplication::beep();
pass
def somethingChanged(self):
# if (autoRecalc)
# recalculate();
# emit modified();
pass
# Cell *Spreadsheet::cell(int row, int column) const
def cell(self, row, column):
# return static_cast<Cell *>(item(row, column));
return self.item(row, column)
# void Spreadsheet::setFormula(int row, int column,
# const QString &formula)
def setFormula(self,row, column, formula):
# Cell *c = cell(row, column);
# if (!c) {
# c = new Cell;
# setItem(row, column, c);
# }
# c->setFormula(formula);
pass
def formula(self,row, column):
# Cell *c = cell(row, column);
# if (c) {
# return c->formula();
# } else {
# return "";
# }
pass
def text(self,row,column):
# Cell *c = cell(row, column);
# if (c) {
# return c->text();
# } else {
# return "";
# }
pass
# bool SpreadsheetCompare::operator()(const QStringList &row1,
# # const QStringList &row2) const
# bool SpreadsheetCompare::operator()(const QStringList &row1,
# const QStringList &row2) const
# {
# for (int i = 0; i < KeyCount; ++i) {
# int column = keys[i];
# if (column != -1) {
# if (row1[column] != row2[column]) {
# if (ascending[i]) {
# return row1[column] < row2[column];
# } else {
# return row1[column] > row2[column];
# }
# }
# }
# }
# return false;
# }
if __name__ == "__main__":
from PyQt4.QtGui import QApplication
import sys
app = QApplication(sys.argv)
sheet = Spreadsheet()
sheet.show()
app.exec_()
| [
"jiang_xmin@masssclouds.com"
] | jiang_xmin@masssclouds.com |
90a7306fe26d82895ac7c22cfef608dbd2ab86eb | f57e1ea2c21fbf54315a0b37305e59af29d02b4b | /apps/operations/migrations/0003_auto_20191125_1601.py | 864c687ac8bcb76e3a7d664de8d5df15f7e5fc81 | [
"MIT"
] | permissive | fxbyun/IT-MOOC | a74b2fb19ae32d38ab66400bb99c2596c1a5c631 | f361512e6ff81a6e120ee808b20d78a79e325f5c | refs/heads/master | 2022-09-18T15:29:53.782805 | 2020-05-09T16:52:22 | 2020-05-09T16:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Generated by Django 2.2 on 2019-11-25 16:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('operations', '0002_banner'),
]
operations = [
migrations.AlterModelOptions(
name='banner',
options={'verbose_name': '轮播图', 'verbose_name_plural': '轮播图'},
),
]
| [
"949172252@qq.com"
] | 949172252@qq.com |
80bd588467dd6fa205b2dd32d4c69424413b8982 | 19fca2cba4ea7fe105f4a5aa4e9d7e1878ed42fc | /bot/cogs/users.py | 4433cd14bdaf0704ba01ce5baed0afb1af727853 | [
"BSD-3-Clause"
] | permissive | mikeysan/TearDrops | 13e977a5cda106a7eb874957343311e792ce9916 | e98c35df7760d201e5ff887dd53641d1006a9644 | refs/heads/main | 2023-08-03T14:53:24.236517 | 2023-02-22T15:34:01 | 2023-02-22T15:34:01 | 309,755,770 | 0 | 0 | BSD-3-Clause | 2023-07-25T21:16:14 | 2020-11-03T17:12:23 | Python | UTF-8 | Python | false | false | 1,727 | py | from typing import Optional
from discord import Member, Embed, Interaction, app_commands
from discord.ext import commands
class Users(commands.Cog):
def __init__(self, client):
self.client = client
@app_commands.command(
name="user", description="Get information about a server member"
)
async def user(self, interaction: Interaction, user: Optional[Member] = None):
"""Gives info about a Server member"""
if not user:
user = interaction.user
# async with ctx.channel.typing():
embed = Embed(
title=f"{user.name}'s info",
description="Here's what I could find.",
color=user.color,
)
embed.add_field(name="Name", value=user.name, inline=True)
embed.add_field(name="ID", value=user.id, inline=True)
embed.add_field(name="Highest role", value=user.top_role, inline=True)
embed.add_field(name="Joined", value=user.joined_at)
embed.add_field(name="Account Created on", value=user.created_at)
embed.set_thumbnail(url=user.avatar_url)
await interaction.response.send_message(embed=embed)
@app_commands.command(name="avatar", description="")
async def avatar(self, interaction: Interaction, *, user: Member = None):
"""Fetches a user's avatar"""
if not user:
user = interaction.user
# async with ctx.channel.typing():
embed = Embed(color=user.color)
embed.set_footer(text=f"Displaying avatar of {user.display_name}")
embed.set_image(url=user.avatar_url)
await interaction.response.send_message(embed=embed)
async def setup(client):
await client.add_cog(Users(client))
| [
"vyom.j@protonmail.com"
] | vyom.j@protonmail.com |
0a27adddec7bd5a28a468dc596cbe844ddc978dc | 49d732bca602b4a9a7a5e430d837d5c327d51202 | /EDUCATION/Изменчивость.py | fdd8fc0b8611b8a7706e9097677b59c1396b9085 | [] | no_license | Xithriub/Future | 6169cff43f27a84259f7ca3ea097f813b7b4f630 | 40a9db1d554dc4b89b71b2fae82a0e7be07fcc46 | refs/heads/master | 2023-06-26T06:24:55.695316 | 2021-07-24T11:19:35 | 2021-07-24T11:19:35 | 389,040,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | def cube(x):
return x**2
x=int(input())
print(cube(x))
def cube3(q):
return q**3
q=int(input())
print(cube3(q))
def rectangle_area(x,y):
return x*y
def rectangle_perimetr(x,y):
return x+y
return (x,y)*2
x=int(input())
y=int(input())
print(rectangle_perimetr(x,y))
| [
"boponya.ru@gmail.com"
] | boponya.ru@gmail.com |
9c89013783cafd6e57461721b35391526117bceb | 9a06f17bc392af22937d11852c4a63059e9496e5 | /main.py | a81b82f27464bf7d7e58c74f6b3aa1427584d3ca | [] | no_license | hey-anthony/ChartApp | 7ebd4164558d87383cce18bf4e4559b0c12d171e | b0335558ce686dd49e8684d6e7ff4fe11752c511 | refs/heads/master | 2020-03-18T10:19:15.780744 | 2018-05-23T17:51:30 | 2018-05-23T17:51:30 | 134,606,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from math import *
from chardBoard import *
root = NetXY(1020, 620, 50)
root.buildNet()
| [
"tonymalakhov@gmail.com"
] | tonymalakhov@gmail.com |
f918ed90755ed571a8dec2604d11a81ee8e69e4e | 79934b1f225f9343803f459c3614ff9f4aa5824c | /generic-dynamodb-item/src/generic_dynamodb_item/models.py | 8e34faf1e256242e20657c042ff93109d1fb25ea | [
"MIT"
] | permissive | iann0036/cfn-types | 980980472264d9fa61ee24d9bd51433784f279f1 | e9694cb6123ed052789faa37bffeed619b3573a7 | refs/heads/master | 2022-07-31T01:28:56.977474 | 2022-01-11T00:23:27 | 2022-01-11T00:23:27 | 214,151,036 | 6 | 3 | MIT | 2022-07-15T21:07:58 | 2019-10-10T10:15:02 | Java | UTF-8 | Python | false | false | 10,717 | py | # DO NOT modify this file by hand, changes will be overwritten
import sys
from dataclasses import dataclass
from inspect import getmembers, isclass
from typing import (
AbstractSet,
Any,
Generic,
Mapping,
MutableMapping,
Optional,
Sequence,
Type,
TypeVar,
)
from cloudformation_cli_python_lib.interface import (
BaseModel,
BaseResourceHandlerRequest,
)
from cloudformation_cli_python_lib.recast import recast_object
from cloudformation_cli_python_lib.utils import deserialize_list
T = TypeVar("T")
def set_or_none(value: Optional[Sequence[T]]) -> Optional[AbstractSet[T]]:
if value:
return set(value)
return None
@dataclass
class ResourceHandlerRequest(BaseResourceHandlerRequest):
# pylint: disable=invalid-name
desiredResourceState: Optional["ResourceModel"]
previousResourceState: Optional["ResourceModel"]
typeConfiguration: Optional["TypeConfigurationModel"]
@dataclass
class ResourceModel(BaseModel):
TableName: Optional[str]
Attributes: Optional[Sequence["_Attribute"]]
PartitionValue: Optional[str]
SortValue: Optional[str]
@classmethod
def _deserialize(
cls: Type["_ResourceModel"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_ResourceModel"]:
if not json_data:
return None
dataclasses = {n: o for n, o in getmembers(sys.modules[__name__]) if isclass(o)}
recast_object(cls, json_data, dataclasses)
return cls(
TableName=json_data.get("TableName"),
Attributes=deserialize_list(json_data.get("Attributes"), Attribute),
PartitionValue=json_data.get("PartitionValue"),
SortValue=json_data.get("SortValue"),
)
# work around possible type aliasing issues when variable has same name as a model
_ResourceModel = ResourceModel
@dataclass
class Attribute(BaseModel):
Name: Optional[str]
Value: Optional["_AttributeValue"]
@classmethod
def _deserialize(
cls: Type["_Attribute"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_Attribute"]:
if not json_data:
return None
return cls(
Name=json_data.get("Name"),
Value=AttributeValue._deserialize(json_data.get("Value")),
)
# work around possible type aliasing issues when variable has same name as a model
_Attribute = Attribute
@dataclass
class AttributeValue(BaseModel):
S: Optional[str]
N: Optional[str]
B: Optional[str]
SS: Optional[Sequence[str]]
NS: Optional[Sequence[str]]
BS: Optional[Sequence[str]]
M: Optional[Sequence["_Attribute2"]]
L: Optional[Sequence[Sequence["_Attribute2"]]]
NULL: Optional[bool]
BOOL: Optional[bool]
@classmethod
def _deserialize(
cls: Type["_AttributeValue"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_AttributeValue"]:
if not json_data:
return None
return cls(
S=json_data.get("S"),
N=json_data.get("N"),
B=json_data.get("B"),
SS=json_data.get("SS"),
NS=json_data.get("NS"),
BS=json_data.get("BS"),
M=deserialize_list(json_data.get("M"), Attribute2),
L=deserialize_list(json_data.get("L"), <ResolvedType(ContainerType.MODEL, Attribute2)>),
NULL=json_data.get("NULL"),
BOOL=json_data.get("BOOL"),
)
# work around possible type aliasing issues when variable has same name as a model
_AttributeValue = AttributeValue
@dataclass
class Attribute2(BaseModel):
Name: Optional[str]
Value: Optional["_AttributeValue2"]
@classmethod
def _deserialize(
cls: Type["_Attribute2"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_Attribute2"]:
if not json_data:
return None
return cls(
Name=json_data.get("Name"),
Value=AttributeValue2._deserialize(json_data.get("Value")),
)
# work around possible type aliasing issues when variable has same name as a model
_Attribute2 = Attribute2
@dataclass
class AttributeValue2(BaseModel):
S: Optional[str]
N: Optional[str]
B: Optional[str]
SS: Optional[Sequence[str]]
NS: Optional[Sequence[str]]
BS: Optional[Sequence[str]]
M: Optional[Sequence["_Attribute3"]]
L: Optional[Sequence[Sequence["_Attribute3"]]]
NULL: Optional[bool]
BOOL: Optional[bool]
@classmethod
def _deserialize(
cls: Type["_AttributeValue2"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_AttributeValue2"]:
if not json_data:
return None
return cls(
S=json_data.get("S"),
N=json_data.get("N"),
B=json_data.get("B"),
SS=json_data.get("SS"),
NS=json_data.get("NS"),
BS=json_data.get("BS"),
M=deserialize_list(json_data.get("M"), Attribute3),
L=deserialize_list(json_data.get("L"), <ResolvedType(ContainerType.MODEL, Attribute3)>),
NULL=json_data.get("NULL"),
BOOL=json_data.get("BOOL"),
)
# work around possible type aliasing issues when variable has same name as a model
_AttributeValue2 = AttributeValue2
@dataclass
class Attribute3(BaseModel):
Name: Optional[str]
Value: Optional["_AttributeValue3"]
@classmethod
def _deserialize(
cls: Type["_Attribute3"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_Attribute3"]:
if not json_data:
return None
return cls(
Name=json_data.get("Name"),
Value=AttributeValue3._deserialize(json_data.get("Value")),
)
# work around possible type aliasing issues when variable has same name as a model
_Attribute3 = Attribute3
@dataclass
class AttributeValue3(BaseModel):
S: Optional[str]
N: Optional[str]
B: Optional[str]
SS: Optional[Sequence[str]]
NS: Optional[Sequence[str]]
BS: Optional[Sequence[str]]
M: Optional[Sequence["_Attribute4"]]
L: Optional[Sequence[Sequence["_Attribute4"]]]
NULL: Optional[bool]
BOOL: Optional[bool]
@classmethod
def _deserialize(
cls: Type["_AttributeValue3"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_AttributeValue3"]:
if not json_data:
return None
return cls(
S=json_data.get("S"),
N=json_data.get("N"),
B=json_data.get("B"),
SS=json_data.get("SS"),
NS=json_data.get("NS"),
BS=json_data.get("BS"),
M=deserialize_list(json_data.get("M"), Attribute4),
L=deserialize_list(json_data.get("L"), <ResolvedType(ContainerType.MODEL, Attribute4)>),
NULL=json_data.get("NULL"),
BOOL=json_data.get("BOOL"),
)
# work around possible type aliasing issues when variable has same name as a model
_AttributeValue3 = AttributeValue3
@dataclass
class Attribute4(BaseModel):
Name: Optional[str]
Value: Optional["_AttributeValue4"]
@classmethod
def _deserialize(
cls: Type["_Attribute4"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_Attribute4"]:
if not json_data:
return None
return cls(
Name=json_data.get("Name"),
Value=AttributeValue4._deserialize(json_data.get("Value")),
)
# work around possible type aliasing issues when variable has same name as a model
_Attribute4 = Attribute4
@dataclass
class AttributeValue4(BaseModel):
S: Optional[str]
N: Optional[str]
B: Optional[str]
SS: Optional[Sequence[str]]
NS: Optional[Sequence[str]]
BS: Optional[Sequence[str]]
M: Optional[Sequence["_Attribute5"]]
L: Optional[Sequence[Sequence["_Attribute5"]]]
NULL: Optional[bool]
BOOL: Optional[bool]
@classmethod
def _deserialize(
cls: Type["_AttributeValue4"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_AttributeValue4"]:
if not json_data:
return None
return cls(
S=json_data.get("S"),
N=json_data.get("N"),
B=json_data.get("B"),
SS=json_data.get("SS"),
NS=json_data.get("NS"),
BS=json_data.get("BS"),
M=deserialize_list(json_data.get("M"), Attribute5),
L=deserialize_list(json_data.get("L"), <ResolvedType(ContainerType.MODEL, Attribute5)>),
NULL=json_data.get("NULL"),
BOOL=json_data.get("BOOL"),
)
# work around possible type aliasing issues when variable has same name as a model
_AttributeValue4 = AttributeValue4
@dataclass
class Attribute5(BaseModel):
Name: Optional[str]
Value: Optional["_AttributeValue5"]
@classmethod
def _deserialize(
cls: Type["_Attribute5"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_Attribute5"]:
if not json_data:
return None
return cls(
Name=json_data.get("Name"),
Value=AttributeValue5._deserialize(json_data.get("Value")),
)
# work around possible type aliasing issues when variable has same name as a model
_Attribute5 = Attribute5
@dataclass
class AttributeValue5(BaseModel):
S: Optional[str]
N: Optional[str]
B: Optional[str]
SS: Optional[Sequence[str]]
NS: Optional[Sequence[str]]
BS: Optional[Sequence[str]]
NULL: Optional[bool]
BOOL: Optional[bool]
@classmethod
def _deserialize(
cls: Type["_AttributeValue5"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_AttributeValue5"]:
if not json_data:
return None
return cls(
S=json_data.get("S"),
N=json_data.get("N"),
B=json_data.get("B"),
SS=json_data.get("SS"),
NS=json_data.get("NS"),
BS=json_data.get("BS"),
NULL=json_data.get("NULL"),
BOOL=json_data.get("BOOL"),
)
# work around possible type aliasing issues when variable has same name as a model
_AttributeValue5 = AttributeValue5
@dataclass
class TypeConfigurationModel(BaseModel):
@classmethod
def _deserialize(
cls: Type["_TypeConfigurationModel"],
json_data: Optional[Mapping[str, Any]],
) -> Optional["_TypeConfigurationModel"]:
if not json_data:
return None
return cls(
)
# work around possible type aliasing issues when variable has same name as a model
_TypeConfigurationModel = TypeConfigurationModel
| [
"contact@ianmckay.com.au"
] | contact@ianmckay.com.au |
38f598a220b6dbe8edf5630980366db13ac2cc33 | 92da39ebfd681cb29b60e3d70c7382a5d1bad650 | /exercises/ch-5-ex-2/completed/authorization_server.py | cf87664c7168c12f4e5e1c5d1b637184fbd9a87a | [] | no_license | carbone84/oauth-in-action-code-py-clone | 8a4513deb8a16559209a40ba4ba47be7ad41a6b0 | b5e018ff65a20adf046f94b29e9e0660d29fd1fc | refs/heads/main | 2023-02-03T19:24:54.419091 | 2020-12-07T17:27:45 | 2020-12-07T17:27:45 | 306,489,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,432 | py | from flask import Flask
from flask import render_template, redirect, request
import secrets, base64
from tinydb import TinyDB, Query
app = Flask(__name__)
db = TinyDB('../database.json')
codes = {}
requests = {}
# authorization server information
auth_server = {
'authorization_endpoint': 'http://localhost:5001/authorize',
'token_endpoint': 'http://localhost:5001/token'
}
# client information
clients = [
{
'client_id': 'oauth-client-1',
'client_secret': 'oauth-client-secret-1',
'redirect_uris': ['http://localhost:5000/callback']
}
]
@app.route('/', methods=['GET'])
def index():
return render_template('index.html', clients=clients, auth_server=auth_server)
@app.route('/authorize', methods=['GET'])
def authorize():
client = getClient(request.args.get('client_id', ''))
if not client:
print(f"Unknown client {request.args.get('client_id', '')}")
return render_template('error.html', error="Unknown client")
elif request.args.get('redirect_uri', '') not in client['redirect_uris']:
print(f"Mismatched redirect URI, expected {client.redirect_uris} got {request.args.get('redirect_uri', '')}")
return render_template('error', error="Invalid redirect URI")
else:
request_id = secrets.token_urlsafe(8)
requests[request_id] = request.args
return render_template('approve.html', client=client, request_id=request_id)
@app.route('/approve', methods=['GET', 'POST'])
def approve():
request_id = request.form.get('request_id')
query = requests[request_id]
del requests[request_id]
if not query:
return render_template('error.html', error="No matching authorization request")
if request.form.get('approve'):
if query['response_type'] == 'code':
code = secrets.token_urlsafe(8)
codes[code] = { 'request': query}
# look into url.parse in js>py
callback_url = query['redirect_uri'] + f"?code={code}&state={query['state']}"
return redirect(callback_url)
else:
error = "unsupported_response_type"
callback_url = query['redirect_uri'] + f"?error={error}"
return redirect(callback_url)
else:
error = "access_denied"
callback_url = query['redirect_uri'] + f"?error={error}"
return redirect(callback_url)
@app.route('/token', methods=['POST'])
def token():
auth = request.headers.get('authorization')
client_id = None
if auth:
client_credentials_b64 = auth[8:len(auth)-1].encode()
client_credentials_bytes = base64.b64decode(client_credentials_b64)
client_credentials = client_credentials_bytes.decode('ascii').split(':')
client_id = client_credentials[0]
client_secret = client_credentials[1]
if request.form.get('client_id'):
if client_id:
print("Client attempted to authenticate with multiple methods")
return "invalid_client", 401
client_id = request.form.get('client_id')
client_secret = request.form.get('client_secret')
client = getClient(client_id)
if not client:
print(f"Unknown client {client_id}")
return "invalid_client", 401
if client['client_secret'] != client_secret:
print(f"Mismatched client secret, expected {client.client_secret} got {client_secret}")
return "invalid_client", 401
if request.form.get('grant_type') == 'authorization_code':
code = codes[request.form.get('code')]
if code:
del codes[request.form.get('code')]
if code['request']['client_id'] == client_id:
access_token = secrets.token_urlsafe(32)
refresh_token = secrets.token_urlsafe(32)
#insert to db
db.insert({
'access_token': access_token,
'client_id': client_id
})
db.insert({
'refresh_token': refresh_token,
'client_id': client_id
})
print(f"Issuing access token {access_token}")
#print(f"with scope {code['scope']}")
token_response = {
'access_token': access_token,
'token_type': 'Bearer',
'refresh_token': refresh_token
}
print(f"Issued tokens for code {request.form.get('code')}")
return token_response, 200
else:
print(f"Client mismatch, expected {code['authorization_endpoint_request']['client_id']} got {client_id}")
return "invalid_grant", 400
else:
print(f"Unknown code, {request.args.get('code')}")
return "invalid_grant", 400
elif request.form.get('grant_type') == 'refresh_token':
#call db to check for refresh token
sql = Query()
tokens = db.search(sql.refresh_token == request.form.get('refresh_token'))
if len(tokens) == 1:
token = tokens[0]
if token['client_id'] != client_id:
print(f"Invalid client using a refresh token, expected {token['client_id']} got {client_id}")
db.remove(sql.refresh_token == request.form.get('refresh_token'))
return 400
print(f"We found a matching refresh token: {request.form.get('refresh_token')}")
access_token = secrets.token_urlsafe(32)
token_response = {
'access_token': access_token,
'token_type': 'Bearer',
'refresh_token': token['refresh_token']
}
db.insert({
'access_token': access_token,
'client_id': token['client_id']
})
print(f"Issuing access token {access_token} for refresh token {request.form.get('refresh_token')}")
return token_response, 200
else:
print("No matching token was found.")
return 'invalid_grant', 400
else:
print(f"Unknown grant type, {request.args.get('grant_type')}")
return "unsupported_grant_type", 400
def getClient(client_id):
for client in clients:
if client['client_id'] == client_id:
return client
return "Client not found"
db.truncate()
| [
"bryanacarbone@gmail.com"
] | bryanacarbone@gmail.com |
febc0104f1bc4213d5b835978c1ad5620d91f157 | 8e84b0ebcc82e07380eb0eb240f94a689946899c | /util/logger.py | c6ecb77de5432b9aece304695a6cb5eaef9fbf92 | [
"MIT"
] | permissive | mconstant/graham_discord_bot | b4080c228a6d3fbd276813e9eccc85f64b1fb022 | f88129698a6ed0100c50bfa6878fb4f6e35524c0 | refs/heads/master | 2023-07-01T21:12:47.638206 | 2021-07-30T16:53:18 | 2021-07-30T16:53:18 | 391,128,493 | 0 | 0 | MIT | 2021-07-30T16:49:21 | 2021-07-30T16:28:46 | null | UTF-8 | Python | false | false | 545 | py | import logging
from logging.handlers import TimedRotatingFileHandler, WatchedFileHandler
def setup_logger(log_file: str, log_level: int = logging.INFO) -> logging.Logger:
root = logging.getLogger()
logging.basicConfig(level=log_level)
handler = WatchedFileHandler(log_file)
formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S %z")
handler.setFormatter(formatter)
root.addHandler(handler)
root.addHandler(TimedRotatingFileHandler(log_file, when="d", interval=1, backupCount=100)) | [
"bbedward@gmail.com"
] | bbedward@gmail.com |
08ee92e67c80136d88273b8f4146df7cb2fca289 | bffd24dfede4dec1b0ceabca6becf7936b54ceb8 | /appointment_reminders/wsgi.py | 51db1986f19e7dcf01ef053f09ea4eeda114d12e | [] | no_license | barbaraem/Appointment-app | f0ef688ca4bf720c100db520e186232939901bb7 | aa55d0d5627beae59a8cb78d3694e42c9ddeb986 | refs/heads/master | 2020-03-23T15:08:48.622038 | 2018-07-20T15:01:51 | 2018-07-20T15:01:51 | 141,725,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
WSGI config for appointment_reminders project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "appointment_reminders.settings")
application = get_wsgi_application()
| [
"barbaramalek82@gmail.com"
] | barbaramalek82@gmail.com |
7f144350d87384443247fefaa3f0ea005b1d6671 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/virtual_machine_memory_reservation_info.py | 3707eed990dc6821036df0d45d5babbbba5e6e7d | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualMachineMemoryReservationInfo(vim, *args, **kwargs):
'''The VirtualMachineReservationInfo data object type describes the amount of
memory that is being reserved for virtual machines on the host, and how
additional memory may be acquired.'''
obj = vim.client.factory.create('{urn:vim25}VirtualMachineMemoryReservationInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'allocationPolicy', 'virtualMachineMax', 'virtualMachineMin',
'virtualMachineReserved' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
5933d1ca43ca35763321bc60fe768f2797352ea3 | e7b01a153a8a17904ef96eedc074421a8f39d325 | /galery/create_photo_miniatures.py | 9ee03b428e1d72a3e199fe7eafb5727915343fd2 | [] | no_license | OskarPlawszewski/LuizaLos | b861843af3873d42835e3e06c3fcf57e4f7b4b58 | 84f0fc8e3d30b9081e80e18853d286049db9193e | refs/heads/master | 2020-05-30T14:46:32.957589 | 2017-02-07T18:01:10 | 2017-02-07T18:01:10 | 70,085,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | import os
from sys import platform
from PIL import Image
from galery.models import Photo, Photo_miniture
if platform == "win32":
MEDIA_FILES_PATH = r'C:\Users\oplawsze\media_files'
if platform == 'linux':
MEDIA_FILES_PATH = r'/home/Oskar/media_files/'
SIZE = 128, 128
def fill_db():
"""
refactor me pls
Returns:
"""
Photo_miniture.objects.all().delete()
for photo in Photo.objects.all():
img = Image.open(photo.image)
img.thumbnail(SIZE, Image.ANTIALIAS)
name_of_file = photo.title + 'mini.jpg'
completeName = os.path.join(MEDIA_FILES_PATH, name_of_file)
img.save(completeName, "JPEG")
# print(platform)
# print(os.path.abspath(photo.image))
# print(os.path.abspath(completeName))
# print(os.path.basename(completeName))
# print(os.path.realpath(completeName))
# print(os.path.relpath(completeName))
Photo_miniture.objects.get_or_create(
big_photo=photo,
title=photo.title,
desctiption=photo.desctiption,
image=os.path.basename('/media/' + name_of_file),
# image=os.path.abspath(completeName),
timestamp=photo.timestamp
) | [
"oskar.plawszewski@nokia.com"
] | oskar.plawszewski@nokia.com |
0a904e95317b9e68475c552b4d328f7872160d9e | f3f684dbc46746cc9537d827c71318eb64de6245 | /backend/backend/books/models.py | 31f3063824e995c0e73d095b2ffe0c1d5ccbbf4b | [] | no_license | WallGGom/Bookridge | 847e69ddc57b8dce9f0d754f8063ab58707118cf | ef7e005a5f0de930baeda2e0872f7e3a8b1527e3 | refs/heads/master | 2023-01-23T19:33:44.617582 | 2020-11-18T07:06:35 | 2020-11-18T07:06:35 | 310,851,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | from django.db import models
from django.conf import settings
# Create your models here.
class Kdc(models.Model):
num = models.CharField(max_length=45)
desc = models.TextField(null=True, blank=True)
class IsbnAdd1(models.Model):
num = models.CharField(max_length=45)
target = models.CharField(max_length=45)
desc = models.TextField(null=True, blank=True)
class IsbnAdd2(models.Model):
num = models.CharField(max_length=45)
shape = models.CharField(max_length=100)
desc = models.TextField(null=True, blank=True)
class IsbnAdd3(models.Model):
num = models.CharField(max_length=45)
desc = models.TextField(null=True, blank=True)
class Book(models.Model):
title = models.CharField(max_length=500)
author = models.CharField(max_length=500, null=True, blank=True)
publisher = models.CharField(max_length=500, null=True, blank=True)
vol = models.CharField(max_length=100, null=True, blank=True)
pub_date = models.CharField(max_length=100, null=True, blank=True)
isbn = models.CharField(max_length=100)
price = models.CharField(max_length=100, null=True, blank=True)
img_url = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
isbn_add_original = models.CharField(max_length=10, default=None)
kdc_original = models.CharField(max_length=10, default=None)
isbn_add1 = models.ForeignKey(IsbnAdd1, on_delete=models.CASCADE, default=None)
isbn_add2 = models.ForeignKey(IsbnAdd2, on_delete=models.CASCADE, default=None)
isbn_add3 = models.ForeignKey(IsbnAdd3, on_delete=models.CASCADE, default=None)
kdc = models.ForeignKey(Kdc, on_delete=models.CASCADE, null=True, blank=True)
class PopularBook(models.Model):
gender = models.IntegerField(default=0)
age = models.CharField(max_length=45)
ranking = models.IntegerField(default=0)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
rent_count = models.IntegerField(default=0)
location = models.TextField(null=True, blank=True)
book = models.ForeignKey(Book, on_delete=models.CASCADE, null=True, blank=True)
class BookRequest(models.Model):
isbn = models.CharField(max_length=45)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
class Hashtag(models.Model):
hashtag = models.TextField()
hashtag_books = models.ManyToManyField(Book, related_name='book_hashtags', blank=True)
| [
"pic6367@naver.com"
] | pic6367@naver.com |
27c9b2d9ce23380e8efa21824aee3bc52694acd3 | cd8eb02ef94cdb36ff20060049562c4e232d949c | /PythonSession-7/Code/PythonTuple.py | 8f7ef2a008f1f05f7dfcd104fe01bcfc28b9605f | [] | no_license | aaryajahagirdarGITHUB/Anirudh-Gaikwad-comp-Python-Tutorial | d669e55527f0bfd5381bfa0ae83444162b43b8a2 | 9ac43faa58d4135eea5bec270543abbfea06e7f7 | refs/heads/master | 2022-12-16T12:48:18.099117 | 2019-06-20T04:53:45 | 2019-06-20T04:53:45 | 294,915,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #Program to illustrate Python Tuple
my_tuple = (1, 2, 3,'W','X')
""""
A tuple is created by placing all the items (elements) inside parentheses
(),separated by commas.
"""
print(my_tuple)
print(my_tuple[0])
print(my_tuple[-1])
""""
We can use the index operator []
to access an item in a tuple where the index starts from 0.
"""
print(my_tuple[1:4])
print(my_tuple[:-1])
print(my_tuple[7:])
print(my_tuple[:])
print(my_tuple + (4, 5, 6))
#We can use + operator to combine two tuples. This is also called concatenation
print(my_tuple.count('W'))# Returns the number of items x
print(my_tuple.index('X'))# Returns the index of the item
| [
"noreply@github.com"
] | aaryajahagirdarGITHUB.noreply@github.com |
a55b4819fb0450ad1c9a283afaa245dec195c375 | 55366bb03c0444a60214f0bb0a489c3987487c8d | /checagem.py | f68a2d4cb75d50ed9d1336f53b61cbeceb3ec6e5 | [
"Apache-2.0"
] | permissive | joaofurini/GuerreiroDasEstradas | 4a90d51235265b2b17d6417ce0d49c3704bff17b | 5fce8c1181ca11a2c61c1a103c51625ee933f789 | refs/heads/master | 2020-05-16T00:13:57.479013 | 2019-04-26T21:45:19 | 2019-04-26T21:45:19 | 182,575,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | class Checagem:
def checkDecisao(self, resposta):
while resposta!=1 and resposta!= 2:
resposta = int(input("Digite apenas 1 ou 2 por favor:\n"))
return resposta
| [
"jp.furini@hotmail.com"
] | jp.furini@hotmail.com |
795b1e208905ceb4b1eace859b1de0ac3db9372b | d7d6f612badd6c96bbc8b90fff6663da47af0b03 | /functions/_inv_matmul.py | 357d733900e9f8ca54dae646161b23cfde69a02e | [] | no_license | hiteshsapkota/DRO-Deep-Kernel-Multiple-Instance-Learning | b8d69adba08b40722f19e151e06ffe57a3cee1f4 | 4e23236f50f75dc95ce0f7ab2358528bddcc5310 | refs/heads/main | 2023-01-02T07:24:38.105342 | 2020-10-26T00:07:21 | 2020-10-26T00:07:21 | 304,165,187 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,796 | py | #!/usr/bin/env python3
import torch
from torch.autograd import Function
import settings
def _solve(lazy_tsr, rhs):
from ..lazy import CholLazyTensor, TriangularLazyTensor
if isinstance(lazy_tsr, (CholLazyTensor, TriangularLazyTensor)):
return lazy_tsr.inv_matmul(rhs)
if settings.fast_computations.solves.off() or lazy_tsr.size(-1) <= settings.max_cholesky_size.value():
return lazy_tsr.cholesky()._cholesky_solve(rhs)
else:
with torch.no_grad():
preconditioner = lazy_tsr.detach()._inv_matmul_preconditioner()
return lazy_tsr._solve(rhs, preconditioner)
class InvMatmul(Function):
@staticmethod
def forward(ctx, representation_tree, has_left, *args):
left_tensor = None
right_tensor = None
matrix_args = None
ctx.representation_tree = representation_tree
ctx.has_left = has_left
if ctx.has_left:
left_tensor, right_tensor, *matrix_args = args
else:
right_tensor, *matrix_args = args
orig_right_tensor = right_tensor
lazy_tsr = ctx.representation_tree(*matrix_args)
ctx.is_vector = False
if right_tensor.ndimension() == 1:
right_tensor = right_tensor.unsqueeze(-1)
ctx.is_vector = True
# Perform solves (for inv_quad) and tridiagonalization (for estimating logdet)
if ctx.has_left:
rhs = torch.cat([left_tensor.transpose(-1, -2), right_tensor], -1)
solves = _solve(lazy_tsr, rhs)
res = solves[..., left_tensor.size(-2) :]
res = left_tensor @ res
else:
solves = _solve(lazy_tsr, right_tensor)
res = solves
if ctx.is_vector:
res = res.squeeze(-1)
if ctx.has_left:
args = [solves, left_tensor, orig_right_tensor] + list(matrix_args)
else:
args = [solves, orig_right_tensor] + list(matrix_args)
ctx.save_for_backward(*args)
if settings.memory_efficient.off():
ctx._lazy_tsr = lazy_tsr
return res
@staticmethod
def backward(ctx, grad_output):
# Extract items that were saved
if ctx.has_left:
solves, left_tensor, right_tensor, *matrix_args = ctx.saved_tensors
left_solves = solves[..., : left_tensor.size(-2)]
right_solves = solves[..., left_tensor.size(-2) :]
else:
right_solves, right_tensor, *matrix_args = ctx.saved_tensors
# Get matrix functions
if hasattr(ctx, "_lazy_tsr"):
lazy_tsr = ctx._lazy_tsr
else:
lazy_tsr = ctx.representation_tree(*matrix_args)
# Define gradient placeholders
arg_grads = [None] * len(matrix_args)
left_grad = None
right_grad = None
if any(ctx.needs_input_grad):
# De-vectorize objects
if ctx.is_vector:
right_tensor = right_tensor.unsqueeze(-1)
grad_output = grad_output.unsqueeze(-1)
if not ctx.has_left:
# Compute self^{-1} grad_output
left_solves = InvMatmul.apply(ctx.representation_tree, False, grad_output, *matrix_args)
if any(ctx.needs_input_grad[3:]):
# We call _quad_form_derivative to compute dl/dK
# To ensure that this term is symmetric, we concatenate the left and right solves together,
# and divide the result by 1/2
arg_grads = lazy_tsr._quad_form_derivative(
torch.cat([left_solves, right_solves], -1), torch.cat([right_solves, left_solves], -1).mul(-0.5)
)
if ctx.needs_input_grad[2]:
right_grad = left_solves
if ctx.is_vector:
right_grad.squeeze_(-1)
return tuple([None, None] + [right_grad] + list(arg_grads))
else:
left_solves = left_solves @ grad_output
if ctx.needs_input_grad[3]:
left_grad = grad_output @ right_solves.transpose(-1, -2)
if any(ctx.needs_input_grad[4:]):
# We do this concatenation to ensure that the gradient of lazy_tsr is symmetric
arg_grads = lazy_tsr._quad_form_derivative(
torch.cat([left_solves, right_solves], -1), torch.cat([right_solves, left_solves], -1).mul(-0.5)
)
if ctx.needs_input_grad[2]:
right_grad = left_solves
if ctx.is_vector:
right_grad.squeeze_(-1)
return tuple([None, None] + [left_grad, right_grad] + list(arg_grads))
| [
"hiteshsapkota@gmail.com"
] | hiteshsapkota@gmail.com |
8f6a99be920e4f5df9c258b8bad46fa18c1ed27d | 288bbd086bf362bb42941a394717141fe94b59cb | /cf_037.py | 7fb68217a16714416d3f3137b0e77427511b19a3 | [] | no_license | basekim14/paulLab_codeFestival_py100 | 7108a797939f18c05ab3df140131b38ad08f347a | e8087711e65c1cf56ce131949004dbb6c45d124a | refs/heads/master | 2020-12-12T20:08:31.194895 | 2020-06-04T11:23:54 | 2020-06-04T11:23:54 | 234,218,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # Code Festival - Python Practice 037
# Author : ㄱㄱㅊ
# Title : Using count
# Date : 20-02-15
pointed = input().split()
count_num = 0
for name in pointed:
if count_num < pointed.count(name):
count_num = pointed.count(name)
pointed_name = name
print('%s(이)가 총 %d표로 반장이 되었습니다.' % (pointed_name, count_num))
| [
"basekim14@gmail.com"
] | basekim14@gmail.com |
d2718946f75b25771bba9daa5179842efe804f79 | 516c2da43b6cfa712ecc0194874442f098f0bb25 | /icecreamratings_project/config/urls.py | a8379e7cd5e5254b755d7fac28927aea5375a5c6 | [] | no_license | nchwang/spider | e7a6aa94f9c483fdefd05109b483d6d07baf7317 | 275d9a7e66c3cb8d158ab12123a2d9cb157ac495 | refs/heads/master | 2021-01-01T16:42:00.999750 | 2017-07-21T01:53:48 | 2017-07-21T01:53:48 | 97,893,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('icecreamratings_project.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"nchwang@163.com"
] | nchwang@163.com |
ab1a60dee1f4e783643dca5382273bca138c6a6f | 006fe4d8278da6365afffc6d0009636cef8336f2 | /intro chapters/stripping_names.py | d64fef6cc5fdceb9d7eed968798a9ade4840c21f | [] | no_license | osayi/python_intro | f3714d8ca391ae911fbb96e246c359b0d86f40f2 | 6be7694c49f8756faaf6eb009918726863cc01e5 | refs/heads/master | 2020-04-21T22:51:57.444484 | 2019-07-19T01:58:25 | 2019-07-19T01:58:25 | 169,927,150 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | name = ' Fela '
print(name)
print(name.rstrip())
print(name.lstrip())
print(name.strip())
| [
"adamokuns@gmail.com"
] | adamokuns@gmail.com |
99dc217bc0e3c7d93969753502d481a115a55776 | 4bd4bacecee33cada173e427b5ecb1d758bafaad | /src/scalarizr/storage2/cloudfs/base.py | 78b39ceaae1d9ed9189164619f4692252f4984d3 | [] | no_license | kenorb-contrib/scalarizr | 3f2492b20910c42f6ab38749545fdbb79969473f | 3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83 | refs/heads/master | 2022-11-26T10:00:58.706301 | 2017-11-02T16:41:34 | 2017-11-02T16:41:34 | 108,550,233 | 0 | 2 | null | 2020-07-24T11:05:36 | 2017-10-27T13:33:46 | Python | UTF-8 | Python | false | false | 3,270 | py | import sys
import urlparse
import os
class DriverError(Exception):
pass
def raises(exc_class):
"""
Catches all exceptions from the underlying function, raises *exc_class*
instead.
.. code-block:: python
@raises(MyError)
def func():
raise Exception(message)
func() # raises MyError(message)
"""
def decorator(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
exc = sys.exc_info()
raise exc_class, exc[1], exc[2]
return wrapper
return decorator
def decorate_public_methods(decorator):
"""
An easy way to decorate all methods of a class and it's descendants with
the same decorator. The two following examples are equal:
.. code-block:: python
class Foo(object):
__metaclass__ = decorate_public_methods(decorator)
def foo(self):
pass
class Bar(Foo):
def bar(self):
pass
.. code-block:: python
class Foo(object):
@decorator
def foo(self):
pass
class Bar(Foo):
@decorator
def bar(self):
pass
"""
class DecoratePublicMethods(type):
def __init__(self, name, bases, dic):
super(DecoratePublicMethods, self).__init__(name, bases, dic)
for key, val in dic.iteritems():
if not key.startswith('_') and callable(val):
setattr(self, key, decorator(val))
return DecoratePublicMethods
class CloudFileSystem(object):
__metaclass__ = decorate_public_methods(raises(DriverError))
schema = None
features = {
'multipart': False
}
def _parse_url(self, url):
"""
:returns: bucket, key
"""
o = urlparse.urlparse(url)
assert o.scheme == self.schema, 'Wrong schema: %s' % o.scheme
return o.netloc, o.path[1:]
def _format_url(self, bucket, key):
return '%s://%s/%s' % (self.schema, bucket, key)
def exists(self, url):
parent = os.path.dirname(url.rstrip('/'))
# NOTE: s3 & gcs driver converts bucket names to lowercase while url
# arg in this method stays uncoverted -> url with uppercase bucket
# name will never be found
return url in self.ls(parent)
def ls(self, url):
raise NotImplementedError()
def stat(self, url):
'''
size in bytes
type = dir | file | container
'''
raise NotImplementedError()
def put(self, src, url, report_to=None):
raise NotImplementedError()
def get(self, url, dst, report_to=None):
raise NotImplementedError()
def delete(self, url):
raise NotImplementedError()
def multipart_init(self, path, part_size):
'''
:returns: upload_id
'''
raise NotImplementedError()
def multipart_put(self, upload_id, src):
raise NotImplementedError()
def multipart_complete(self, upload_id):
raise NotImplementedError()
def multipart_abort(self, upload_id):
raise NotImplementedError()
| [
"kenorb@users.noreply.github.com"
] | kenorb@users.noreply.github.com |
032b32cedc6c276e81afe3a15e3f9faa07720581 | 9a4b583317959ec020511095e5c132a169bb9257 | /features/steps/mc_steps_for_crf_entities_with_user_params_creation.py | dcf36e4e3a3cab947f85fde090e8ffc23e8cb879 | [] | no_license | Bochkarev90/epro-master | ed512a90a3489b3b6a4b05aa8117c3b605d14c52 | 2afb78ec0bc1f8e7fc94d14bc0af9dd2f3bd3369 | refs/heads/master | 2022-12-15T22:18:46.388556 | 2020-08-30T10:58:02 | 2020-08-30T10:58:02 | 291,240,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,656 | py | from behave import given
class Entities:
def __init__(self, field_label, value):
self._value = value
self._field_label = field_label
self._common_fields = None
self._pseudos_and_selectors = None
self._checkboxes = None
@property
def step(self):
if self._field_label in self._common_fields:
return f"\nAnd I put {self._value} in {self._field_label} field"
elif self._field_label in self._pseudos_and_selectors:
return f"\nAnd I choose {self._value} option in {self._field_label} field"
elif self._field_label in self._checkboxes:
if self._value.lower() != 'false':
return f"\nAnd I click on {self._field_label} checkbox"
else:
return ""
else:
exception = f"""No field {self._field_label} on adding popup. Fields that can be used: {
', '.join(self._common_fields + self._pseudos_and_selectors + self._checkboxes)}.
If you see this message, but you're sure that the field is on popup -
add it into Entities class in mc_entities_creation module"""
raise Exception(exception)
class Visit(Entities):
def __init__(self, field_label, value):
super().__init__(field_label, value)
self._common_fields = ['Visit Name', 'Visit Code', 'Order', 'Max Repeat Number']
self._pseudos_and_selectors = ['Visit Type', 'Epoch', 'Forms']
self._checkboxes = ['Is Repeated', 'Is Mandatory']
class Form(Entities):
def __init__(self, field_label, value):
super().__init__(field_label, value)
self._common_fields = ['Form Name', 'Form Code', 'Order', 'Description']
self._pseudos_and_selectors = ['Visit', 'Epoch', 'Form Type']
class Section(Entities):
def __init__(self, field_label, value):
super().__init__(field_label, value)
self._common_fields = ['Section Name', 'Section Code', 'Order', 'Description', 'Max Repeat Number']
self._pseudos_and_selectors = ['Section template', 'DataSet']
self._checkboxes = ['is Repeating', 'is Mandatory', 'In table format', 'Auto numbering']
class Item(Entities):
def __init__(self, field_label, value):
super().__init__(field_label, value)
self._common_fields = ['Title', 'Code', 'Code:', 'Order', 'Description', 'Length', 'Default', 'Columns Width']
self._pseudos_and_selectors = ['Field Type', 'Data Type', 'Control Type']
self._checkboxes = ['Is Critical', 'Is Mandatory', 'Is Lab Data', 'Is Data Transfer']
class Schedule(Entities):
def __init__(self, field_label, value):
super().__init__(field_label, value)
self._common_fields = []
self._pseudos_and_selectors = ['Schedule Type', 'Please, define a pattern for your schedule']
self._checkboxes = []
@given("I create visit with params")
def step_impl(context):
steps_to_execute = """
When I click on VISIT STRUCTURE button
And I click on ADD VISIT button
"""
for param in context.table:
steps_to_execute += Visit(field_label=param['param'], value=param['value']).step
steps_to_execute += "\nAnd I click on SAVE button"
context.execute_steps(steps_to_execute)
@given("I create form with params")
def step_impl(context):
steps_to_execute = """
When I click on CRF DESIGNING button
And I click on ADD FORM button
"""
for param in context.table:
steps_to_execute += Form(field_label=param['param'], value=param['value']).step
steps_to_execute += "\nAnd I click on SAVE button"
context.execute_steps(steps_to_execute)
@given("I create section with params in form with {form_code} code")
def step_impl(context, form_code):
steps_to_execute = f"""
When I click on CRF DESIGNING button
And I expand record with params in forms table
| column header | td value |
| Form Code | {form_code} |
And I click on ADD SECTION button
"""
for param in context.table:
steps_to_execute += Section(field_label=param['param'], value=param['value']).step
steps_to_execute += "\nAnd I click on SAVE button"
context.execute_steps(steps_to_execute)
@given("I create item with params in section with {section_code} code")
def step_impl(context, section_code):
steps_to_execute = f"""
When I expand record with params in sections table
| column header | td value |
| Section Code | {section_code} |
And I click on ADD NEW ITEM button
"""
for param in context.table:
steps_to_execute += Item(field_label=param['param'], value=param['value']).step
steps_to_execute += "\nAnd I click on SAVE button"
context.execute_steps(steps_to_execute)
@given("I create schedule with params for form with {form_code} form code")
def step_impl(context, form_code):
schedule_params = dict(context.table)
raise Exception("Not implemented")
# TODO
# steps_to_execute = f"""
# When I click on CRF DESIGNING button
# And I create schedule for record with params in forms table
# | column header | td value |
# | Form Code | {form_code} |
# And I click on ADD NEW ITEM button
# """
# for param in context.table:
# steps_to_execute += Item(field_label=param['param'], value=param['value']).step
# steps_to_execute += "\nAnd I click on SAVE button"
# context.execute_steps(steps_to_execute)
| [
"bochkarev.rabota@gmail.com"
] | bochkarev.rabota@gmail.com |
3d60faa0d259278366090f61f42a51ec3355ab24 | 0d23b4bc209a455e489352b8c5cf0b61d713d1d1 | /match/management/commands/reset-matches.py | 95f010d6dd635e63a1e19e0959da41b86116c261 | [
"MIT"
] | permissive | maxf/address-matcher | 4de5815c011e3b720b04b55cd87813cd4dcb0615 | cdc907e7417c1be36b077f06951db7e8875bb505 | refs/heads/master | 2020-12-25T15:17:41.947966 | 2016-11-08T16:29:12 | 2016-11-08T16:29:12 | 67,626,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
| [
"max@froumentin.net"
] | max@froumentin.net |
f04339670af036069b46af8492061f639973847f | 088314e3bd6ca7ef34d15f2aa45b743b363641d9 | /tasks/R2R/speaker/paths.py | 974d47598f0ee0d6c3af239f2de1668c10bf89b4 | [
"MIT"
] | permissive | weituo12321/PREVALENT_R2R | 7a27d580fcbe8f72a209697d053ca3eb2013e3a0 | 868fb53d6b7978bbb10439a59e65044c811ee5c2 | refs/heads/master | 2022-11-24T00:54:32.385940 | 2020-07-24T17:56:42 | 2020-07-24T17:56:42 | 248,832,547 | 8 | 7 | MIT | 2022-11-22T02:10:54 | 2020-03-20T19:07:08 | Python | UTF-8 | Python | false | false | 637 | py | convolutional_feature_store_paths = {
'imagenet': 'img_features/imagenet_convolutional',
'places365': 'img_features/places365_convolutional',
}
mean_pooled_feature_store_paths = {
'imagenet': 'img_features/ResNet-152-imagenet.tsv',
'places365': 'img_features/ResNet-152-places365.tsv',
}
bottom_up_feature_store_path = "img_features/bottom_up_10_100"
bottom_up_feature_cache_path = "img_features/bottom_up_10_100.pkl"
bottom_up_feature_cache_dir = "img_features/bottom_up_10_100_cache"
bottom_up_attribute_path = "data/visual_genome/attributes_vocab.txt"
bottom_up_object_path = "data/visual_genome/objects_vocab.txt"
| [
"weituo.hao@gmail.com"
] | weituo.hao@gmail.com |
5b32a619d72869652ed1676f8fc18bfca7241c85 | 5d9f2f091b77e0c31643b70bc256794867a18faa | /scripts/prepro_ngrams.py | 04b978f3606ea984daa9084fac1920a56f630aa1 | [] | no_license | wubaoyuan/adversarial-attack-to-caption | 9a6c26276371db46a4620d54537aa741f31c6d86 | c575ed7feeb7f9243e5707c0d2d34991cdafb9ab | refs/heads/master | 2020-05-31T23:49:09.802901 | 2019-11-23T14:55:31 | 2019-11-23T14:55:31 | 190,545,228 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,494 | py | """
Preprocess a raw json dataset into hdf5/json files for use in data_loader.lua
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/images is (N,3,256,256) uint8 array of raw image data in RGB format
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
import os
import json
import argparse
from six.moves import cPickle
from collections import defaultdict
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in xrange(1,n+1):
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def create_crefs(refs):
crefs = []
for ref in refs:
# ref is a list of 5 captions
crefs.append(cook_refs(ref))
return crefs
def compute_doc_freq(crefs):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
'''
document_frequency = defaultdict(float)
for refs in crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]):
document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return document_frequency
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if (params['split'] == img['split']) or \
(params['split'] == 'train' and img['split'] == 'restval') or \
(params['split'] == 'all'):
#(params['split'] == 'val' and img['split'] == 'restval') or \
ref_words = []
ref_idxs = []
for sent in img['sentences']:
tmp_tokens = sent['tokens'] + ['<eos>']
tmp_tokens = [_ if _ in wtoi else 'UNK' for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
ngram_words = compute_doc_freq(create_crefs(refs_words))
ngram_idxs = compute_doc_freq(create_crefs(refs_idxs))
return ngram_words, ngram_idxs, count_imgs
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
itow = json.load(open(params['dict_json'], 'r'))['ix_to_word']
wtoi = {w:i for i,w in itow.items()}
imgs = imgs['images']
ngram_words, ngram_idxs, ref_len = build_dict(imgs, wtoi, params)
cPickle.dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open(params['output_pkl']+'-words.p','w'), protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open(params['output_pkl']+'-idxs.p','w'), protocol=cPickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', default='data/dataset_coco.json', help='input json file to process into hdf5')
parser.add_argument('--dict_json', default='data/cocotalk.json', help='output json file')
parser.add_argument('--output_pkl', default='data/coco-all', help='output pickle file')
parser.add_argument('--split', default='all', help='test, val, train, all')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
main(params)
| [
"xuyan5533@gmail.com"
] | xuyan5533@gmail.com |
4dc985bb7c5f79c2d658275f3238ffb7666f68b7 | d4fd822c0cec03c5675e57e6f1cad8fb01de3fcf | /local_server_updated.py | b5ba8e42dcd794362ca70858b64ba909dc76ee19 | [] | no_license | ArtrixTech/HarmonyFuqiang | fd0fab4c226b5dcd634da1d5377ef5965e56c9d2 | 936921531c38f1dd6424355e75455dda2a2b634f | refs/heads/master | 2021-01-01T20:14:36.099625 | 2017-08-04T01:22:23 | 2017-08-04T01:22:23 | 98,798,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,798 | py | import socket
from threading import Thread
from io import BytesIO
import gzip
IS_DEBUG = False
def cut_string(input_str, head, tail):
if isinstance(
head,
str) and isinstance(
tail,
str) and isinstance(
input_str,
str):
try:
start = input_str.find(head) + len(head)
end = input_str.find(tail, start)
rt_str = ""
for index in range(start, end):
rt_str += input_str[index]
return rt_str
except ValueError as e:
print("Syntax does not match! Message: " + e)
raise ValueError("Syntax does not match! Message: " + e)
else:
raise TypeError("Inputs are not string!")
def unzip_gzip(input_data):
stream = BytesIO(input_data)
gzip_obj = gzip.GzipFile(fileobj=stream)
try:
return gzip_obj.read()
except OSError:
return input_data
def new_request_process(source_socket, source_address):
def get_port(data):
try:
cut_result = cut_string(data, target_host + ":", "/")
if cut_result.isdigit():
return int(cut_result)
except ValueError:
return 80
return 80
def decode(source):
try:
return source.decode("gb2312")
except UnicodeDecodeError:
try:
return source.decode("utf-8")
except UnicodeDecodeError:
return False
while True:
try:
received = source_socket.recv(1024 * 1024)
except ConnectionAbortedError:
break
except ConnectionResetError:
break
if received:
received = unzip_gzip(received)
decoded = decode(received)
if decoded and "CONNECT" not in decoded:
target_header = decoded.split("\r\n")
target_host = target_header[1].split(":")[1].strip()
target_port = get_port(decoded)
print("Target Host:" + target_host + ":" + str(target_port))
connect_succeed = False
target_binder = (target_host, target_port)
target_socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
try:
target_socket.connect(target_binder)
connect_succeed = True
except ConnectionRefusedError:
target_socket.close()
connect_succeed = False
except socket.gaierror:
target_socket.close()
connect_succeed = False
if connect_succeed:
target_socket.send(received)
target_socket.settimeout(0.5)
try:
response = target_socket.recv(1024)
while response:
try:
source_socket.send(response)
response = target_socket.recv(1024)
except ConnectionAbortedError:
break
except socket.timeout:
target_socket.close()
except ConnectionAbortedError:
target_socket.close()
proxy_server_address = ('127.0.0.1', 2850)
proxy_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy_server.bind(proxy_server_address)
proxy_server.listen(5)
while True:
# jam the listening thread
input_socket, address = proxy_server.accept()
n_thread = Thread(
target=new_request_process, args=(
input_socket, address,))
n_thread.start()
| [
"artrix@126.com"
] | artrix@126.com |
2e9eff8dee310ca6608ca04cd53b779bdaac0063 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02570/s396702537.py | 9fe0af1e414c1c161a8894caf8757f2e4915c3db | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | import sys; input = sys.stdin.readline
from math import ceil
d, t, s = map(int, input().split())
u, l = ceil(d/t), d//t
if u == l:
if u <= s: print("Yes")
else: print("No")
else:
if d/t <= s: print("Yes")
else:print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
490d28e40c9b5816f86bcab6d2e4c28452afb06a | 98fe63b14714cba6263d158a2b579ba7d7992383 | /recipes/create_zp_catalog.py | cc4a8ac7f4dbf8be823952ff6e99be02ba68f3c8 | [] | no_license | Jerry-Ma/WODP | 84a480754f060733364298719f41de32e350ae7c | 8eedbc4e4c98c29329439ce512e85e084dbde331 | refs/heads/master | 2020-12-25T15:08:41.742167 | 2016-08-11T13:51:38 | 2016-08-11T13:51:38 | 65,472,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Create Date : 2016-07-31 14:58
# Python Version : 2.7.12
# Git Repo : https://github.com/Jerry-Ma
# Email Address : jerry.ma.nk@gmail.com
"""
create_zp_catalog.py
"""
from astropy.table import Table, Column
from pyjerry.instrument.wiyn import WIYNFact
from common import open_with_meta
if __name__ == "__main__":
import sys
from astropy.io import fits
from astropy.time import Time
in_file, img_file, out_file = sys.argv[1:]
renamecol = [
('ra', 'SDSS_RA'), ('dec', 'SDSS_DEC'),
('u', 'SDSS_MAG_U'), ('err_u', 'SDSS_ERR_U'),
('g', 'SDSS_MAG_G'), ('err_g', 'SDSS_ERR_G'),
('r', 'SDSS_MAG_R'), ('err_r', 'SDSS_ERR_R'),
('i', 'SDSS_MAG_I'), ('err_i', 'SDSS_ERR_I'),
('z', 'SDSS_MAG_Z'), ('err_z', 'SDSS_ERR_Z'),
('ALPHA_J2000', 'ODI_RA'), ('DELTA_J2000', 'ODI_DEC'),
('MAG_AUTO', 'ODI_MAG_AUTO'), ('MAGERR_AUTO', 'ODI_ERR_AUTO'),
('XWIN_IMAGE', 'ODI_X'), ('YWIN_IMAGE', 'ODI_Y'),
]
hdulist, exts, layout = open_with_meta(img_file)
hdulist.close()
if layout == 'odi56':
get_ota_xy = WIYNFact.get_ota_xy
elif layout == 'podi':
get_ota_xy = WIYNFact.get_ota_xy_podi
else:
raise RuntimeError("layout {0} not recognized".format(layout))
tbl = Table.read(in_file, format='ascii.commented_header')
for oc, nc in renamecol:
tbl.rename_column(oc, nc)
# header column
hdulist = fits.open(img_file)
for key in ['AIRMASS', 'EXPMEAS']:
col = Column([hdulist[0].header[key], ] * len(tbl), name=key)
tbl.add_column(col)
# get mjd time
obstime = Time(hdulist[0].header['DATE-MID'], format='isot', scale='utc')
col_time = Column([obstime.mjd, ] * len(tbl), name='MJD')
tbl.add_column(col_time)
hdulist.close()
# odixy column
ota_xy = [get_ota_xy(ext) for ext in tbl['EXT_NUMBER']]
col_odi = Column(ota_xy, name='ODI_OTA')
tbl.add_column(col_odi)
tbl.write(out_file, format='ascii.commented_header')
| [
"jerry.ma.nk@gmail.com"
] | jerry.ma.nk@gmail.com |
d2643fba787193ae5a7ffbdf6a4b9891e581c6bf | 1bba04469cc15775e8e72b091a6a96b0a1655686 | /venv/Scripts/django-admin.py | 95f019d377722dfe0000bab3b86edb9fb6a52371 | [] | no_license | lvmenghui001/ssssbbs | e239e6540bdce9bef3be0a160150bee41802ae3e | 2007fdd989e498ef3748f69911fd1a26a81aaa12 | refs/heads/master | 2020-03-29T11:21:07.173334 | 2018-09-22T05:13:41 | 2018-09-22T05:13:41 | 149,847,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | #!C:\Users\lmh\Desktop\PYTHON\django\ssssbbs\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"lmh@qq.com"
] | lmh@qq.com |
b77e4eac13884f67c28b97d4f2248645d6d007b8 | 99b30366e96963839ad61d49d1eeaa5782d8780f | /api/liberouterapi/modules/mqtt/__init__.py | 7fc23d6f00131eb9be573fa99416d238991b3ce1 | [] | no_license | petrstehlik/examon-web | bd114171aaa055565bd9c23151764f26c9aa538f | 75d42b93bf8dc8b429e969d6679448ce4ba6219f | refs/heads/master | 2021-01-20T21:23:29.949813 | 2017-08-30T10:39:30 | 2017-08-30T10:39:30 | 101,760,427 | 1 | 2 | null | 2017-08-29T13:02:48 | 2017-08-29T12:48:59 | Python | UTF-8 | Python | false | false | 2,957 | py | from liberouterapi import app, socketio, config
from liberouterapi.error import ApiException
from ..module import Module
from ..utils import split_list, merge_dicts
from Holder import Holder
from flask import Blueprint, request
from flask_socketio import send, emit, join_room, leave_room
import json
import logging
class MqttError(ApiException):
status_code = 500
mqtt = Blueprint('mqtt', __name__, url_prefix = '/mqtt')
log = logging.getLogger(__name__)
subscribed_metrics = dict()
def emit_data(node, metric, data):
global subscribed_metrics
if metric in subscribed_metrics and subscribed_metrics[metric] > 0:
log.debug("Metric: %s (subscribers: %s)", metric, subscribed_metrics[metric])
socketio.server.emit('data', {
'metric' : metric,
'node' : node,
'data' : data,
'range' : holder.minmax(metric)
},
namespace='/render',
room = metric)
# Initialize Holder with config topics
holder = Holder(config['mqtt']['server'],
mqtt_topics = json.loads(config['mqtt']['topics']))
holder.on_store = emit_data
@mqtt.route('/metric/<string:metric>')
def get_metric(metric):
"""
Return given metric data from a holder
"""
try:
return(json.dumps(holder.db[metric]))
except KeyError as e:
raise MqttError("Metric %s not found in holder's DB" % metric, status_code=404)
@mqtt.route('/nodes')
def get_nodes():
return(json.dumps(holder.nodes))
@socketio.on('subscribe-metric', namespace='/render')
def subscribe_metric(json):
global subscribed_metrics
if 'metric' in json:
metric = json['metric']
try:
if metric in subscribed_metrics:
subscribed_metrics[metric] += 1
else:
subscribed_metrics[metric] = 1
join_room(metric)
emit('initial-data', holder.db[metric], room = metric)
except KeyError as e:
emit('error', "Cannot find given metric '%s'" % metric)
else:
emit('error', "Missing metric in request")
@socketio.on('unsubscribe-metric', namespace='/render')
def unsubscribe_metric(json):
global subscribed_metrics
if 'metric' in json:
metric = json['metric']
try:
log.info("Unsubscribing from %s" % metric)
if metric in subscribed_metrics:
if subscribed_metrics[metric] > 0:
subscribed_metrics[metric] -= 1
else:
emit('error', "No subscriber in the room")
if subscribed_metrics[metric] == 0:
del subscribed_metrics[metric]
else:
emit('error', "Room doesn't exist")
leave_room(metric)
except KeyError as e:
emit('error', "Cannot find given metric '%s'" % metric)
else:
emit('error', "Missing metric in request")
| [
"xstehl14@stud.fit.vutbr.cz"
] | xstehl14@stud.fit.vutbr.cz |
34142fa0a3e841e6cc3dcf0af699be368dda9cde | c5ae28cd31ccd4b3530ff0b890fc00221cc1b223 | /Compression/interface.py | 07089a39297f44d77ce1ba1e24da68db2e3b4482 | [] | no_license | Drab-Hounet/algorithme | 232f1640b74795e1848e936c0de4315618e3970d | 9acec8ded0ea6b3317e1a68b83337b6424fd0a81 | refs/heads/master | 2020-12-30T13:59:33.034283 | 2017-05-19T09:54:44 | 2017-05-19T09:54:44 | 91,266,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,085 | py | import tkinter as tk
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
import compression
import decompression
import toolbox
class Window:
def __init__(self):
self.toolbox = toolbox.ToolBox()
self.TypeOperation = "compression"
def popup(self, message):
messagebox.showinfo("Information", message)
def swapCheckbuttonCompress(self, stateCompressCheck, stateDecompressCheck, label):
if (stateCompressCheck and stateDecompressCheck):
self.TypeOperation = "compression"
label.set("fichier à compresser")
self.checkbuttonDecompress.toggle()
elif(stateCompressCheck and not stateDecompressCheck):
self.TypeOperation = "compression"
label.set("fichier à compresser")
def swapCheckbuttonDecompress(self, stateCompressCheck, stateDecompressCheck, label):
if (stateCompressCheck and stateDecompressCheck ):
self.TypeOperation = "decompression"
label.set("fichier à décompresser")
self.checkbuttonCompress.toggle()
elif(not stateCompressCheck and stateDecompressCheck):
self.TypeOperation = "decompression"
label.set("fichier à décompresser")
def interfaceCompression(self, path):
fname = askopenfilename(filetypes= [("All files", "*.txt")] )
path.set(fname)
def execute(self, path):
fname = path.get()
text=''
if (fname):
with open(fname) as fileToCompress:
for line in fileToCompress:
text = text + line
if(self.TypeOperation == "compression"):
task = compression.CompressionTxt(text)
else:
task = decompression.DecompressionTxt(text)
message = task.runProcess()['message']
self.popup(message)
def interface(self):
window = tk.Tk()
window.title('Compression - Decompression ')
canvaWindow = tk.Canvas(
window,
bg = '#80A0D8',
height = '300',
width = '500')
window.resizable(0,0)
label = tk.StringVar()
label.set("fichier à compresser")
labelCompress = tk.Label(
window,
textvariable = label,
bg = '#80A0D8')
canvaWindow.create_window(10, 100, window = labelCompress, anchor = "w")
path = tk.StringVar()
pathCompress = tk.Label(
window,
textvariable = path,
width = '30',
anchor = 'w')
canvaWindow.create_window(250, 100, window = pathCompress)
checkVarCompress = tk.IntVar()
checkVarDecompress = tk.IntVar()
self.checkbuttonCompress = tk.Checkbutton(
window,
text = "Compression",
bg = '#80A0D8',
onvalue = True,
offvalue = False,
variable = checkVarCompress,
command = lambda : self.swapCheckbuttonCompress(checkVarCompress.get(), checkVarDecompress.get(), label))
canvaWindow.create_window(200, 150, window = self.checkbuttonCompress, anchor = "w")
self.checkbuttonDecompress = tk.Checkbutton(
window,
text = "Decompression",
bg = '#80A0D8',
onvalue = True,
offvalue = False,
variable = checkVarDecompress,
command = lambda : self.swapCheckbuttonDecompress(checkVarCompress.get(), checkVarDecompress.get(), label))
canvaWindow.create_window(200, 175, window = self.checkbuttonDecompress, anchor = "w")
buttonChooseFileCompress = tk.Button(
window,
text = 'Choisir ...',
command = lambda : self.interfaceCompression(path))
canvaWindow.create_window(420, 100, window = buttonChooseFileCompress, width = "100")
buttonRunProcess = tk.Button(
window,
text = 'Run',
command = lambda : self.execute(path))
canvaWindow.create_window(420, 250, window = buttonRunProcess, width = "100")
canvaWindow.pack()
window.mainloop()
| [
"jerome.lombard@campus-numerique-in-the-alps.com"
] | jerome.lombard@campus-numerique-in-the-alps.com |
1e8e3c62a023c1063089af44a879ea93c51b8d60 | bde1da567dd3a4fc17e3200ff367312d21fa29fe | /agents/mixins.py | 7f140deff0f6311ffb2bdc67553e4fbd0c64d2fc | [
"MIT"
] | permissive | adrian-kalinin/django-crm | 87d859b55e29b4601f558f14a3edf7aff7e4b7f9 | 8fba525f281d2253f47698c4d2ed75c074f6a531 | refs/heads/main | 2023-06-24T01:30:03.144224 | 2021-07-20T15:42:29 | 2021-07-20T15:42:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from django.contrib.auth.mixins import AccessMixin
from django.shortcuts import redirect
class OrganiserAndLoginRequiredMixin(AccessMixin):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated or not request.user.is_organiser:
return redirect('leads:lead-list')
return super().dispatch(request, *args, **kwargs)
| [
"adrian.kalinin@protonmail.com"
] | adrian.kalinin@protonmail.com |
35caceecfe007534fe487f5b4ff7aa3dbc3ca320 | a62348929ea1911e7842beef868e5fa4d64d7927 | /api/server.py | 8184d335c168b6db069c51ebc95d73fb154b6084 | [
"MIT"
] | permissive | RENCI/pds-server-mock | 8bfa040421b4ffcfd0bfe7332ff39cecc1b84408 | 8d003561948b23aa4260c32d453a8f2c901e1dc7 | refs/heads/master | 2021-01-05T21:18:26.476560 | 2020-12-05T05:15:00 | 2020-12-05T05:15:00 | 241,140,488 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | import connexion
def create_app():
app = connexion.FlaskApp(__name__, specification_dir='openapi/')
app.add_api('my_api.yaml')
return app
| [
"xuh@cs.unc.edu"
] | xuh@cs.unc.edu |
addd273548263059e163970d6259a7de35e3a85d | 614ac13226e5cebcdd1b577128f661b1f3fe47a3 | /Tools/RegistrationSITK/reg_test.py | a8ddd2896269344a3087d7f02e5d63107c9bb88e | [
"MIT"
] | permissive | taznux/radiomics-tools | a35fe7a9fa297656bfea6980f0e215b5b3b7e0c8 | a089d146a50ff32412ae4147bd2f8388a23fa4d7 | refs/heads/master | 2022-06-13T05:54:10.651766 | 2022-06-08T22:14:24 | 2022-06-08T22:14:24 | 62,413,523 | 25 | 13 | MIT | 2020-06-30T03:39:32 | 2016-07-01T18:49:08 | C++ | UTF-8 | Python | false | false | 5,986 | py | __author__ = 'wchoi'
import SimpleITK as sitk
import numpy as np
# import os
# from ipywidgets import interact, fixed
import registration_callbacks as rc
# import registration_utilities as ru
inputImageList = [
["UMD0003", "UMD0003_20050121_PT", "UMD0003_20050120_CT"],
["UMD0012", "UMD0012_20050321_PT", "UMD0012_20050321_CT"],
["UMD0053", "UMD0053_20050203_PT", "UMD0053_20050202_CT"]
]
# %matplotlib qt
# This is the registration configuration which we use in all cases. The only parameter that we vary
# is the initial_transform.
def multires_registration(fixed_image, moving_image, initial_transform):
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.02)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=0.5, numberOfIterations=200,
estimateLearningRate=registration_method.EachIteration,
maximumStepSizeInPhysicalUnits=5.0)
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, rc.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_plot_values(registration_method))
final_transform = registration_method.Execute(fixed_image, moving_image)
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
return final_transform
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix + '.mha')
sitk.WriteTransform(transform, outputfile_prefix + '.tfm')
for inputImages in inputImageList:
crop_str = "-subvolume-scale_1"
print("Load Images")
fixed_image = sitk.ReadImage("D:/WFUBMC_nrrd/" + inputImages[0] + "/" + inputImages[1] + crop_str + ".nrrd",
sitk.sitkFloat32)
print(fixed_image)
moving_image = sitk.ReadImage("D:/WFUBMC_nrrd/" + inputImages[0] + "/" + inputImages[2] + crop_str + ".nrrd",
sitk.sitkFloat32)
print(moving_image)
initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
print(initial_transform)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.02)
registration_method.SetInterpolator(sitk.sitkLinear)
# The order of parameters for the Euler3DTransform is [angle_x, angle_y, angle_z, t_x, t_y, t_z]. The parameter
# sampling grid is centered on the initial_transform parameter values, that are all zero for the rotations. Given
# the number of steps and their length and optimizer scales we have:
# angle_x = -pi, 0, pi
# angle_y = 0
# angle_z = -pi, -pi/2, 0, pi/2, pi
registration_method.SetOptimizerAsExhaustive(numberOfSteps=[1, 0, 2, 0, 0, 0], stepLength=np.pi)
registration_method.SetOptimizerScales([1, 1, 0.5, 1, 1, 1])
registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, rc.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_plot_values(registration_method))
# Perform the registration in-place so that the initial_transform is modified.
registration_method.SetInitialTransform(initial_transform, inPlace=True)
registration_method.Execute(fixed_image, moving_image)
print(initial_transform)
final_transform = multires_registration(fixed_image, moving_image, initial_transform)
print(final_transform)
final_transform.WriteTransform("D:/WFUBMC_nrrd/" + inputImages[0] + "/LinearTransform_5.h5")
| [
"wchoi1022@gmail.com"
] | wchoi1022@gmail.com |
c370c472aafebd84a2b87575f6dcb0e37b94e932 | 6472c4553c49a8c05103355ff53b1cbb7f025e8f | /pava/implementation/natives/sun/nio/ch/WindowsAsynchronousSocketChannelImpl.py | a42dc13374784791b67769b761b7248140f118ab | [
"MIT"
] | permissive | laffra/pava | 0b012e27c207a3e0f3ca772667b0c32168fe3123 | 54d10cf7f8def2f96e254c0356623d08f221536f | refs/heads/master | 2021-01-23T04:23:22.887146 | 2020-12-21T23:14:09 | 2020-12-21T23:14:09 | 86,191,143 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | def add_native_methods(clazz):
def initIDs____(a0):
raise NotImplementedError()
def connect0__long__boolean__java_net_InetAddress__int__long__(a0, a1, a2, a3, a4, a5):
raise NotImplementedError()
def updateConnectContext__long__(a0, a1):
raise NotImplementedError()
def read0__long__int__long__long__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def write0__long__int__long__long__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def shutdown0__long__int__(a0, a1, a2):
raise NotImplementedError()
def closesocket0__long__(a0, a1):
raise NotImplementedError()
clazz.initIDs____ = staticmethod(initIDs____)
clazz.connect0__long__boolean__java_net_InetAddress__int__long__ = staticmethod(connect0__long__boolean__java_net_InetAddress__int__long__)
clazz.updateConnectContext__long__ = staticmethod(updateConnectContext__long__)
clazz.read0__long__int__long__long__ = staticmethod(read0__long__int__long__long__)
clazz.write0__long__int__long__long__ = staticmethod(write0__long__int__long__long__)
clazz.shutdown0__long__int__ = staticmethod(shutdown0__long__int__)
clazz.closesocket0__long__ = staticmethod(closesocket0__long__)
| [
"iV29VQzQVT11"
] | iV29VQzQVT11 |
643bc69de3a2f226c971508fa46fcf733afe5345 | 2a08b808ebcabf35b842f67ade9875814fedb892 | /python/ActDirs.py | a1e8e79a1f31e8bdd30421d84a1fca43f6c3bfe4 | [] | no_license | XelekGakure/MineSGen | b5fa243de42ac50c1c08499219150331d9299bf7 | 5cffa293c841fdfd574a2c1b6dd48c798ada9ba6 | refs/heads/master | 2020-03-19T03:23:15.928685 | 2018-06-03T19:15:48 | 2018-06-03T19:15:48 | 135,723,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | import os
import logs
def generateIfNotExist(dir):
if not os.path.isdir(dir):
os.mkdir(dir)
logs.log(str(dir) + " created" + bcolors.ENDC)
def LogIfExist(dir):
if not os.path.isdir(dir):
logs.log(str(dir) + " not exist" + bcolors.ENDC)
else:
logs.log(str(dir) + " exist" + bcolors.ENDC)
| [
"julien.lauret@ynov.com"
] | julien.lauret@ynov.com |
939bdf3945b578df78cd4ee6287d02dc27df5c29 | a07a17bc06a3b6c8765f071af64ca51ff7fe4c80 | /setup.py | 278362ce757448dcb52d12eb103664795397ec9b | [
"MIT"
] | permissive | katomaso/django-market | df3856e67c480f51c1ca4bc9cb77c759a78fd5ca | 84c4fa10aefbd792a956cef3d727623ca78cb5fd | refs/heads/master | 2020-06-27T18:27:07.449443 | 2020-04-24T18:32:53 | 2020-04-24T18:32:53 | 200,018,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | # coding:utf-8
import os
from setuptools import setup
def from_requirements(reqname):
"""Extract packages from requirements."""
with open(reqname, "rt") as reqs:
return [line.rstrip() for line in reqs]
packages = from_requirements("requirements.txt")
REDISCLOUD_KYES = (
'REDISCLOUD_URL',
'REDISCLOUD_PORT',
'REDISCLOUD_PASSWORD',
)
if all(map(lambda key: key in os.environ, REDISCLOUD_KYES)):
packages.append('django-redis-cache')
packages.append('hiredis')
setup(
name='django-market',
version='1.0',
description='Online market where multiple people can sell the same thing',
author='Tomas Peterka',
author_email='tomas@peterka.me',
url='',
install_requires=packages
)
| [
"prestizni@gmail.com"
] | prestizni@gmail.com |
acf7591025827c9574cbf93aaa316009d30b7f91 | 44a1364c54e68a48b2d05593b6453af558dbb05f | /Corner.py | 5a7044a2908ed4d9e607ed222057ca9469811ab0 | [] | no_license | stats19/stats19-dataset_tools | a64bef366383f3e1115e07c7d480370e07d990b5 | 9c61aaa353c064c048cef52ee2ab0f901585ecdb | refs/heads/master | 2022-10-24T03:07:34.217655 | 2020-06-16T09:54:14 | 2020-06-16T09:54:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | import re
def cornerInfotoMysql(dico, connmysql):
cur_mysql1 = connmysql.cursor()
cur_mysql2 = connmysql.cursor()
if not dico.get("elapsed_plus"):
dico["elapsed_plus"] = "NULL"
if dico.get("player1") and dico.get("team") and re.match("^\d+$", dico.get("player1")):
# print("SELECT teams_match_id FROM teams_matches WHERE match_id = %s AND team_id = %s;" % (
# dico.get("match_api_id"), dico.get("team")))
cur_mysql1.execute(
"SELECT teams_match_id FROM teams_matches WHERE match_id = %s AND team_id = %s;" % (
dico.get("match_api_id"), dico.get("team")
)
)
team_match_id = cur_mysql1.fetchone()
if team_match_id:
# print(
# "SELECT team_matches_player_id FROM teams_matches_players where teams_match_id = %s and player_id = %s;" % (
# team_match_id[0], dico.get("player1"))
# )
cur_mysql2.execute(
"SELECT team_matches_player_id FROM teams_matches_players where teams_match_id = %s and player_id = %s;" % (
team_match_id[0], dico.get("player1"))
)
t_matches_player_id = cur_mysql2.fetchone()
team_matches_player_id = ""
if t_matches_player_id:
dico["team_matches_player_id"] = t_matches_player_id[0]
# dico["team_matches_player_id"] = team_matches_player_id
# print("team_match_id : %s\nteam_matches_player_id : %s" % (team_match_id[0], team_matches_player_id))
else:
print("========\nplayer1 not found : %s\n=======" % (dico.get("player1")))
dico["substitute"] = dico.get("player1")
dico["substitute_team"] = dico.get("team")
cur_mysql1.close
cur_mysql2.close
return dico
| [
"pierre.sididris@live.fr"
] | pierre.sididris@live.fr |
c298645d8fbccbc1048f2356f90a670ad3994bf9 | 46e9a375f0562f5dfd9282da20cbb34900f6b230 | /Python/986.py | 083a36ffc45f5cd0093f7a1ef29041c027ef9106 | [] | no_license | MohisinShaik/LeetCode | da90a65a4980d9daa1bc11581f4d0aa415ddb2f4 | dd788a3bffc8c5121cbb83d8c2efe077bf8693db | refs/heads/master | 2022-04-23T11:16:42.855063 | 2020-04-16T04:58:39 | 2020-04-16T04:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | class Solution:
def intervalIntersection(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:
res = []
i = 0
j = 0
while i < len(A) and j < len(B):
a_overlap_b = A[i][0] >= B[j][0] and A[i][0] <= B[j][1]
b_overlap_a = B[j][0] >= A[i][0] and B[j][0] <= A[i][1]
if a_overlap_b or b_overlap_a:
start = max(A[i][0], B[j][0])
end = min(A[i][1], B[j][1])
res.append([start, end])
if A[i][1] < B[j][1]:
i += 1
else:
j += 1
return res
| [
"tvandcc@gmail.com"
] | tvandcc@gmail.com |
69d57719f82e9299fb4ecba4a5a2f957f0f18902 | 76f69f45139f05c7369a3e002ddd61ae5c77d584 | /shopping/urls.py | fa0c612a2775f8feccce31084ba7f11ee154c26b | [] | no_license | val-sytch/shop_django | 1ebe3e2a686bc4b7fbf8930725e59a2fc93905ce | da184244cd7061bf41f0857a9d7ec3866873c833 | refs/heads/master | 2021-01-12T13:29:02.508391 | 2016-10-18T20:43:15 | 2016-10-18T20:43:15 | 69,954,782 | 0 | 1 | null | 2016-10-22T16:08:44 | 2016-10-04T10:28:49 | CSS | UTF-8 | Python | false | false | 323 | py | from django.conf.urls import url, patterns
from shopping.views import add, remove
urlpatterns = patterns('shopping.views',
url(r'^add/(?P<id>[0-9]+)$', add, name='shopping-cart-add'),
url(r'^remove/(?P<id>[0-9]+)$', remove, name='shopping-cart-remove'),
url(r'^show/$', 'show', name='shopping-cart-show'),
)
| [
"optrv@users.noreply.github.com"
] | optrv@users.noreply.github.com |
0183f3f7f2baf910cf0facb6ce6d58af7ec6df00 | ebcb092d796366d36a1afe9c381cd9e4c31026f1 | /redis/python_redis_publisher.py | 6e0bd349be9638af02233a09bf270796fdcc99af | [
"MIT"
] | permissive | MiracleWong/PythonBasic | d2e0e56c88781ebf9c6870f185ceaba6ffaa21ca | cb8ec59dc646842b41966ea4ea4b1ee66a342eee | refs/heads/master | 2021-06-06T22:26:08.780210 | 2020-01-08T14:48:54 | 2020-01-08T14:48:54 | 96,536,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | #!/usr/local/bin/python
# -*- coding:utf-8 -*-
# 订阅
from RedisHelper import RedisHelper
obj = RedisHelper()
obj.publish('nihao')#发布
| [
"cfwr1991@126.com"
] | cfwr1991@126.com |
fd7483a0b14698d89f735ecc1c5329191a309eae | ed26014727d50135c6d3f64f1bca358e8a990db2 | /first_app/views.py | 353229ae6cb670cef2e07d2d25402784abb1177f | [] | no_license | emreozbay/udemy-project | 7e940652db892f24cfb7d02ebe669b33123e9307 | 58f9122e19e1bda8f54d117b73711a10b02fbf4d | refs/heads/master | 2022-12-22T22:54:38.692977 | 2018-08-31T00:25:15 | 2018-08-31T00:25:15 | 146,821,054 | 0 | 1 | null | 2022-12-11T08:41:45 | 2018-08-31T00:28:19 | Python | UTF-8 | Python | false | false | 357 | py | from django.shortcuts import render
from django.http import HttpResponse
from first_app.models import Topic,Webpage,AccessRecord
def index(request):
webpages_list = AccessRecord.objects.order_by('date')
date_dict = {'access_records': webpages_list}
return render(request,'first_app/index.html', context=date_dict)
# Create your views here.
| [
"ozbay-emre@hotmail.com"
] | ozbay-emre@hotmail.com |
150c67577f0f6c5a4a2fe64d2d53eef3be28fd70 | fe43dfacb59372b54c68d8e7dd70f4c319602962 | /Backend/communities/serializers.py | d82d3bfaac8f4a5d6c45d911bef0a1192d73b748 | [] | no_license | BK-notburgerking/FilmFunFair | cfcdf420df35a6faf6171ae5b38ce2f17fac4831 | fdbc16401ca3a38c7d7a3467abcbbc693268bdc8 | refs/heads/master | 2023-06-04T00:49:28.994383 | 2021-06-14T01:57:01 | 2021-06-14T01:57:01 | 376,670,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | from rest_framework import serializers
from .models import Post, Comment
class CommentSerializer(serializers.ModelSerializer):
user_name = serializers.CharField(source='user.username', read_only=True)
class Meta:
model = Comment
fields = ('id', 'user_name', 'text')
read_only_fields = ('id', 'user_name')
class PostListSerializer(serializers.ModelSerializer):
user_name = serializers.CharField(source='user.username', read_only=True)
class Meta:
model = Post
fields = ('title', 'user_name', 'content', 'created_at', 'updated_at', 'id')
class PostSerializer(serializers.ModelSerializer):
user_name = serializers.CharField(source='user.username', read_only=True)
post_comment = CommentSerializer(many=True, read_only=True)
class Meta:
model = Post
fields = '__all__'
read_only_fields = ('user',)
| [
"oij1234567@gmail.com"
] | oij1234567@gmail.com |
fafb709de29460ea75e5e827e59780b2672d0ff8 | 054be2ae723f891245bbb8a83be0d24f19c43a39 | /lib/Warnings.py | 59fe966d7f086159d794f3cb4e438eed5deb8619 | [
"MIT"
] | permissive | Thomas84/pyRevitExtension | daea0cd5f5a36e8f46deddfae981c2335c9b484e | 442f0a94003abbd125b176d399fdbf9c0b4f61bc | refs/heads/master | 2022-04-07T21:18:58.956988 | 2020-03-11T20:35:48 | 2020-03-11T20:35:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import clr
from tools import CountFrequency
def Warnings(doc):
"""Get warnings from document"""
warnings = doc.GetWarnings()
warningMessages =[w.GetDescriptionText() for w in warnings]
countOfWarnings = CountFrequency(warningMessages)
allwarnings = [{"Description": key, "Count": value} for key, value in countOfWarnings.items()]
if len(allwarnings) < 1:
allwarnings.append({"Description": "", "Count": 0} )
return allwarnings | [
"pderendinger@gmail.com"
] | pderendinger@gmail.com |
62e37ebc3e6e0bfb19bb51f5101b05e188c158be | 3c9011b549dd06b6344c6235ed22b9dd483365d1 | /OrientacaoObjeto/aula18.py | 1ed8f6b0cc2da4d9f90b4791c271fa308f1c8a10 | [] | no_license | joaoo-vittor/estudo-python | 1411f4c3620bbc5f6b7c674a096cae8f90f0db8d | 5562d823dd574d7df49fddca87a1fbd319356969 | refs/heads/master | 2023-05-31T17:59:16.752835 | 2021-06-25T04:54:56 | 2021-06-25T04:54:56 | 292,372,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | from contextlib import contextmanager
"""
Aula 18
Context Manager - Criando e Usando gerenciadores de contexto
"""
"""
class Arquivo:
def __init__(self, arquivo, modo):
print('__init__')
self.arquivo = open(arquivo, modo)
def __enter__(self):
print('__enter__')
return self.arquivo
def __exit__(self, exc_type, exc_val, exc_tb):
print('__exit__')
self.arquivo.close()
with Arquivo('teste.txt', 'w') as f:
f.write('Hello World!')
"""
@contextmanager
def abrir(arquivo, modo):
try:
print('abrindo arquivo')
arquivo = open(arquivo, modo)
yield arquivo
finally:
print('fechando arquivo')
arquivo.close()
with abrir('teste.txt', 'w') as f:
f.write('Ola, mundo')
| [
"joaoo.vittor007@gmail.com"
] | joaoo.vittor007@gmail.com |
1728b847861e15b766914debf8e1b2d8dce830d1 | ee25dafcedca278c604dddb68f1ab28d8e1ecef0 | /source/Hybrid SLIM ItemCB_BF/Hybrid_SLIM_ItemCB_BF.py | 6bc6a02515164c18cca288e4eb0eb3da83321c26 | [] | no_license | nschejtman/recsys | 43ceaac9ba3a07ddd9860a9455aba1d64c446f23 | 99fb9098371487c69eb56de83f6f57c93e5aeb02 | refs/heads/master | 2021-01-18T21:39:38.841353 | 2017-01-31T17:24:09 | 2017-01-31T17:24:09 | 71,791,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,508 | py | import numpy as np
import scipy.sparse as sps
from collections import namedtuple
from sklearn.model_selection import KFold, ParameterGrid
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import ElasticNet, Ridge, Lasso
from sklearn.base import BaseEstimator
from sklearn.preprocessing import MaxAbsScaler
import time
import pandas as pd
import sys
sys.path.append('./../')
import utils.utils as ut
from TopPopular.TopPopular import TopPop
def cv_search(rec, urm, non_active_items_mask, sample_size, sample_from_urm=True):
np.random.seed(1)
urm_sample, icm_sample, _, non_active_items_mask_sample = ut.produce_sample(urm, icm=None, ucm=None,
non_active_items_mask=non_active_items_mask,
sample_size=sample_size, sample_from_urm=sample_from_urm)
params = {'l1_penalty': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10],
'l2_penalty': [0.001, 0.01, 0.1, 1, 10, 50, 100, 500, 1000],
'k_top': [100, 200, 500, 1000],
'count_top_pop':[True, False]}
params = {'l1_ratio':[0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.5, 1],
'k_top': [100, 200, 500, 1000],
'count_top_pop': [True, False]}
params = {'l1_ratio': [0.00000001,0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.5],
'k_top': [500, 1000, 2000, 5000, 10000],
'count_top_pop': [True, False]}
params = {'alpha_ridge':[9500, 9750, 10000, 25000, 50000, 75000, 100000]}
params = {'alpha_ridge':[100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, 1000000]}
params = {'alpha_lasso':[]}
grid = list(ParameterGrid(params))
folds = 4
kfold = KFold(n_splits=folds)
splits = [(train, test) for train,test in kfold.split(urm_sample)]
retained_ratings_perc = 0.75
n = 5
result = namedtuple('result', ['mean_score', 'std_dev', 'parameters'])
results = []
total = float(reduce(lambda acc, x: acc * len(x), params.itervalues(), 1) * folds)
prog = 1.0
for pars in grid:
print pars
rec = rec.set_params(**pars)
#rec.l1_ratio = rec.l1_penalty / (rec.l1_penalty + rec.l2_penalty)
#rec.top_pop.count = pars['count_top_pop']
maps = []
for row_train, row_test in splits:
urm_train = urm_sample[row_train,:]
rec.fit(urm_train)
urm_test = urm_sample[row_test,:]
hidden_ratings = []
for u in range(urm_test.shape[0]):
relevant_u = urm_test[u,].nonzero()[1] # Indices of rated items for test user u
if len(relevant_u) > 1:#1 or 2
np.random.shuffle(relevant_u)
urm_test[u, relevant_u[int(len(relevant_u) * retained_ratings_perc):]] = 0
hidden_ratings.append(relevant_u[int(len(relevant_u) * retained_ratings_perc):])
else:
hidden_ratings.append([])
maps.append(ut.map_scorer(rec, urm_test, hidden_ratings, n, non_active_items_mask_sample)) # Assume rec to predict indices of items, NOT ids
print "Progress: {:.2f}%".format((prog * 100) / total)
prog += 1
print maps
results.append(result(np.mean(maps), np.std(maps), pars))
print "Result: ", result(np.mean(maps), np.std(maps), pars)
scores = pd.DataFrame(data=[[_.mean_score, _.std_dev] + _.parameters.values() for _ in results],
columns=["MAP", "Std"] + _.parameters.keys())
print "Total scores: ", scores
scores.to_csv('SLIM_Item CV MAP values 3 (Ridge).csv', sep='\t', index=False)
'''cols, col_feat, x_feat = 3, 'l2_penalty', 'l1_penalty'
f = sns.FacetGrid(data=scores, col=col_feat, col_wrap=cols, sharex=False, sharey=False)
f.map(plt.plot, x_feat, 'MAP')
f.fig.suptitle("SLIM-Top pop CV MAP values")
i_max, y_max = scores['MAP'].argmax(), scores['MAP'].max()
i_feat_max = params[col_feat].index(scores[col_feat][i_max])
f_max = f.axes[i_feat_max]
f_max.plot(scores[x_feat][i_max], y_max, 'o', color='r')
plt.figtext(0, 0, "With 500 top pops\nMaximum at (sh={:.5f},k={:.5f}, {:.5f}+/-{:.5f})".format(
scores[col_feat][i_max],
scores[x_feat][i_max],
y_max,
scores['Std'][i_max]))
plt.tight_layout()
plt.subplots_adjust(top=0.9, bottom=0.15)
f.savefig('SLIM_Item CV MAP values 1.png', bbox_inches='tight')'''
class Hybrid_SLIM_ItemCB_BF(BaseEstimator):
def __init__(self, top_pops, l1_ratio=None, positive_only=True, alpha_ridge=None, alpha_lasso=None,
sh=2000, slim_weight=0.5,pred_batch_size=2500):
super(Hybrid_SLIM_ItemCB_BF, self).__init__()
self.positive_only = positive_only
self.l1_ratio = l1_ratio
self.alpha_ridge = alpha_ridge
self.alpha_lasso = alpha_lasso
self.top_pops = top_pops
self.sh = sh
self.slim_weight = slim_weight
self.pred_batch_size = pred_batch_size
def __str__(self):
return "SLIM (l1_penalty={},l2_penalty={},positive_only={})".format(
self.l1_penalty, self.l2_penalty, self.positive_only
)
def fit(self, URM, icm):
self.icm = icm
print time.time(), ": ", "Started fit"
self.dataset = URM
URM = ut.check_matrix(URM, 'csc', dtype=np.float32)
n_items = URM.shape[1]
# initialize the ElasticNet model
if self.alpha_ridge is not None:
self.model = Ridge(self.alpha_ridge, copy_X=False, fit_intercept=False)
elif self.alpha_lasso is not None:
self.model = Lasso(alpha=self.alpha_lasso, copy_X=False, fit_intercept=False)
else:
self.model = ElasticNet(alpha=1.0, l1_ratio=self.l1_ratio, positive=self.positive_only, fit_intercept=False, copy_X=False)
# we'll store the W matrix into a sparse csr_matrix
# let's initialize the vectors used by the sparse.csc_matrix constructor
values, rows, cols = [], [], []
# fit each item's factors sequentially (not in parallel)
for j in self.top_pops:#, because only the active ones are to be recommended(range(n_items) if self.k_top is None else top_pops):
# print time.time(), ": ", "Started fit > Iteration ", j, "/", n_items
# get the target column
y = URM[:, j].toarray()
# set the j-th column of X to zero
startptr = URM.indptr[j]
endptr = URM.indptr[j + 1]
bak = URM.data[startptr: endptr].copy()
URM.data[startptr: endptr] = 0.0
# fit one ElasticNet model per column
#print time.time(), ": ", "Started fit > Iteration ", j, "/", n_items, " > Fitting ElasticNet model"
if self.alpha_ridge is None and self.alpha_lasso is None:
self.model.fit(URM, y)
else:
self.model.fit(URM, y.ravel())
# self.model.coef_ contains the coefficient of the ElasticNet model
# let's keep only the non-zero values
nnz_mask = self.model.coef_ > 0.0
values.extend(self.model.coef_[nnz_mask])
rows.extend(np.arange(n_items)[nnz_mask])
cols.extend(np.ones(nnz_mask.sum()) * j)
# print nnz_mask.sum(), (self.model.coef_ > 1e-4).sum()
# finally, replace the original values of the j-th column
URM.data[startptr:endptr] = bak
# generate the sparse weight matrix
self.W_sparse = sps.csc_matrix((values, (rows, cols)), shape=(n_items, n_items), dtype=np.float32)
print time.time(), ": ", "Finished fit"
def predict(self, urm, n, non_active_items_mask):
print "Started prediction"
user_profile = urm
n_iterations = user_profile.shape[0] / self.pred_batch_size + (user_profile.shape[0] % self.pred_batch_size != 0)
ranking = None
for i in range(n_iterations):
print "Iteration: ", i + 1, "/", n_iterations
start = i * self.pred_batch_size
end = start + self.pred_batch_size if i < n_iterations - 1 else user_profile.shape[0]
batch_profiles = user_profile[start:end, :]
rated_items_batch = np.diff(batch_profiles.tocsc().indptr) != 0
# print "Similarity batch size: ", np.extract(rated_items_batch == True, rated_items_batch).shape[0]
# break
batch_sim_mat = ut.compute_similarity_matrix_mask(self.icm, self.sh, rated_items_batch)
mm_scaler = MaxAbsScaler(copy=False)
batch_sim_mat = mm_scaler.fit_transform(batch_sim_mat)
self.W_sparse = mm_scaler.fit_transform(self.W_sparse)
avg_sim_mat = self.slim_weight*self.W_sparse + (1-self.slim_weight)*batch_sim_mat
batch_scores = batch_profiles.dot(avg_sim_mat).toarray().astype(np.float32)
del avg_sim_mat
# remove the ones that are already rated
nonzero_indices = batch_profiles.nonzero()
batch_scores[nonzero_indices[0], nonzero_indices[1]] = 0.0
# remove the inactives items
batch_scores[:, non_active_items_mask] = 0.0
batch_ranking = batch_scores.argsort()[:, ::-1]
batch_ranking = batch_ranking[:, :n] # leave only the top n
sum_of_scores = batch_scores[np.arange(batch_scores.shape[0]), batch_ranking.T].T.sum(axis=1).ravel()
zero_scores_mask = sum_of_scores == 0
n_zero_scores = np.extract(zero_scores_mask, sum_of_scores).shape[0]
if n_zero_scores != 0:
batch_ranking[zero_scores_mask] = [self.top_pops[:n] for _ in range(n_zero_scores)]
if i == 0:
ranking = batch_ranking.copy()
else:
ranking = np.vstack((ranking, batch_ranking))
return ranking
urm = ut.read_interactions()
items_dataframe = ut.read_items()
icm = ut.generate_icm(items_dataframe)
item_ids = items_dataframe.id.values
actives = np.array(items_dataframe.active_during_test.values)
non_active_items_mask = actives == 0
test_users_idx = pd.read_csv('../../inputs/target_users_idx.csv')['user_idx'].values
urm_pred = urm[test_users_idx, :]
top_rec = TopPop(count=True)
top_rec.fit(urm)
top_pops = top_rec.top_pop[non_active_items_mask[top_rec.top_pop] == False]
recommender = Hybrid_SLIM_ItemCB_BF(top_pops=top_pops, alpha_ridge=100000, sh=500, slim_weight=0.7, pred_batch_size=200) #Also 50000 and 1000000, and Lasso
recommender.fit(urm, icm)
# cv_search(recommender, urm, non_active_items_mask, sample_size=10000, sample_from_urm=True)
ranking = recommender.predict(urm_pred, n=5, non_active_items_mask=non_active_items_mask)
ut.write_recommendations("Hybrid SLIM ItemCB AlphaR 100000 sh 500 weight 0.7", ranking, test_users_idx, item_ids)
recommender = Hybrid_SLIM_ItemCB_BF(top_pops=top_pops, alpha_ridge=100000, sh=5000, slim_weight=0.7, pred_batch_size=200) #Also 50000 and 1000000, and Lasso
recommender.fit(urm, icm)
# cv_search(recommender, urm, non_active_items_mask, sample_size=10000, sample_from_urm=True)
ranking = recommender.predict(urm_pred, n=5, non_active_items_mask=non_active_items_mask)
ut.write_recommendations("Hybrid SLIM ItemCB AlphaR 100000 sh 5000 weight 0.7", ranking, test_users_idx, item_ids)
recommender = Hybrid_SLIM_ItemCB_BF(top_pops=top_pops, alpha_ridge=100000, sh=500, slim_weight=0.9, pred_batch_size=200) #Also 50000 and 1000000, and Lasso
recommender.fit(urm, icm)
# cv_search(recommender, urm, non_active_items_mask, sample_size=10000, sample_from_urm=True)
ranking = recommender.predict(urm_pred, n=5, non_active_items_mask=non_active_items_mask)
ut.write_recommendations("Hybrid SLIM ItemCB AlphaR 100000 sh 500 weight 0.9", ranking, test_users_idx, item_ids)
recommender = Hybrid_SLIM_ItemCB_BF(top_pops=top_pops, alpha_ridge=100000, sh=5000, slim_weight=0.9, pred_batch_size=200) #Also 50000 and 1000000, and Lasso
recommender.fit(urm, icm)
# cv_search(recommender, urm, non_active_items_mask, sample_size=10000, sample_from_urm=True)
ranking = recommender.predict(urm_pred, n=5, non_active_items_mask=non_active_items_mask)
ut.write_recommendations("Hybrid SLIM ItemCB AlphaR 100000 sh 5000 weight 0.9", ranking, test_users_idx, item_ids) | [
"daniel.vacca@hotmail.com"
] | daniel.vacca@hotmail.com |
39adc6fe1a6e844d8ee1f0e7d1ee33d322917e47 | 653443f348293a6f8bc6b371077f508de81a960b | /libro/problemas_resueltos/Capitulo3/problema3_6.py | 013ecb98c0bb4eb03d919d067598fc37df1a0858 | [] | no_license | RicardoBernal72/CYPRicardoBS | b6527f5e476df310cecae4ef997a7be17cf02189 | 8295523ba1641cbfba013215406d190d0a2af1ba | refs/heads/master | 2020-07-23T18:46:51.968914 | 2019-12-10T19:54:42 | 2019-12-10T19:54:42 | 207,672,072 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | MAY=-100000
MEN=100000
N=int(input("num de enteros que se ingresan"))
I=1
for I in range(0,N,1):
NUM=int(input("num enetro"))
if NUM>MAY:
MAY=NUM
elif NUM<MEN:
MEN=NUM
else:
I=I+1
print(MAY)
print(MEN)
| [
"RicardoBernal72"
] | RicardoBernal72 |
037ab10687535c31dacd8a8deb0d53aff83da0d8 | b26fedf2a68e030768f012f18edfa8d8d7865271 | /PandasProjectLoan/code.py | 42bbbd895d4aa3fdac072b06832f3ff1e8ef32ad | [
"MIT"
] | permissive | hemangi44/greyatom-python-for-data-science | 6c4c6853a35602cbf516dcb5071eabd078cb0bdd | 2598b537e8bd720de78c02bae51ed770c6483b9e | refs/heads/master | 2021-05-21T00:08:45.534382 | 2020-05-16T10:06:23 | 2020-05-16T10:06:23 | 252,464,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank=pd.read_csv(path)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var)
numerical_var=bank.select_dtypes(include='number')
print(numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
banks=bank.drop(columns='Loan_ID')
print(banks.isnull().sum())
bank_mode=banks.mode().iloc[0]
banks.fillna(bank_mode,inplace=True)
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount=pd.pivot_table(banks,index=['Gender','Married','Self_Employed'],values='LoanAmount',aggfunc='mean')
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
# code for loan aprroved for self employed
loan_approved_se = banks.loc[(banks["Self_Employed"]=="Yes") & (banks["Loan_Status"]=="Y"), ["Loan_Status"]].count()
print(loan_approved_se)
# code for loan approved for non self employed
loan_approved_nse = banks.loc[(banks["Self_Employed"]=="No") & (banks["Loan_Status"]=="Y"), ["Loan_Status"]].count()
print(loan_approved_nse)
# percentage of loan approved for self employed
percentage_se = (loan_approved_se * 100 / 614)
percentage_se=percentage_se[0]
# print percentage of loan approved for self employed
print(percentage_se)
#percentage of loan for non self employed
percentage_nse = (loan_approved_nse * 100 / 614)
percentage_nse=percentage_nse[0]
#print percentage of loan for non self employed
print (percentage_nse)
# code ends here
# --------------
# code starts here
def loan(x):
year=x/12
return year
loan_term=banks['Loan_Amount_Term'].apply(lambda x:loan(x))
big_loan_term=len(loan_term[loan_term>=25])
print(big_loan_term)
# code ends here
# --------------
# code starts here
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby=banks.groupby(['Loan_Status'])
loan_groupby=loan_groupby[columns_to_show]
mean_values=loan_groupby.agg([np.mean])
print(mean_values)
# code ends here
| [
"hemangi44@users.noreply.github.com"
] | hemangi44@users.noreply.github.com |
46db447fbc71c10235a518cb8734b21f4cf2b64a | 22a049ff0325ca12ae2efc83ff2a8e96e19b789e | /setup.py | 55e5f2d24a4151104d71d4f6f3eecbd980aee24b | [] | no_license | drcassar/hbd | dffa32e5dd0abc5e8968f5c46bbfa53eead03ca2 | b862d9963899bea20fd691d6652ff8c43b7ee399 | refs/heads/master | 2021-03-27T02:50:02.677471 | 2020-04-13T21:11:08 | 2020-04-13T21:11:08 | 247,778,731 | 0 | 0 | null | 2020-03-19T13:29:25 | 2020-03-16T17:35:00 | Python | UTF-8 | Python | false | false | 406 | py | import setuptools
setuptools.setup(
name='hbd',
version='1.6.9',
author='Daniel Roberto Cassar',
author_email='daniel.r.cassar@gmail.com',
description='hbd',
url="https://github.com/drcassar/hbd",
packages=setuptools.find_packages(),
install_requires=['numpy>=1.1', 'pandas>=0.24.0', 'deap', 'tensorflow',
'mendeleev'],
python_requires='>=3.6',
)
| [
"daniel.r.cassar@gmail.com"
] | daniel.r.cassar@gmail.com |
51dba365b7183071779df233dc52a0e85a8a5e11 | d330aba56416f4d8a413f7d75c133f7e10add587 | /src/python/mem_dump_er.py | 6cd7f5aec9cbb30cb360960784012f55bd4a62ce | [] | no_license | SadatAnwar/TI-Internship | 994a39182a697012b20557cbe38f6ec0f3ae92a9 | 24cf958fcf70cbd03932b5dfd3fbc3d08313928e | refs/heads/master | 2021-03-24T13:18:22.204866 | 2016-07-28T20:15:33 | 2016-07-28T20:15:33 | 41,534,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,838 | py | """
This file is intended to take care of all the programs related to parsing the ATP pattern and generating
assemblers for writing a mory dump from a text file
"""
from atp_from_template_writer import AtpFromTemplateWriter
from atp_to_assembler import AtpToAssembler
from global_variables import *
import jtag_driver
log = logging.getLogger(__name__)
# 4w ATP file signal sequence
TEST = 0
TCK = 1
TMS = 2
TDI = 3
TDO = 4
RST = 5
def toBin(i, l):
binary_string = bin(i)[2:]
if len(binary_string) < l:
padding = l - len(binary_string)
binary_string = '0' * padding + binary_string
return binary_string
class MemoryControl(object):
def __init__(self, memoryFile, templateATP):
Config.read(CONFIG_FILE)
if templateATP is None:
templateATP = Config.get("FRAM_SETTING", "atp_mem_wr_template")
self.TEMPLATE_FOLDER = os.path.join(project_folder, Config.get("FRAM_SETTING", "atp_mem_template_folder"))
self.TEMPLATE_ATP = os.path.join(self.TEMPLATE_FOLDER, templateATP)
self.tempAtpFile = os.path.join(project_folder, Config.get("FRAM_SETTING", "mem_dump_atp"))
else:
self.TEMPLATE_FOLDER = os.path.dirname(templateATP)
self.TEMPLATE_ATP = templateATP
self.tempAtpFile = os.path.join(self.TEMPLATE_FOLDER, tempF, 'temp.atp')
if memoryFile is None:
memoryFile = 'dump.txt'
self.MEM_FILE_FOLDER = os.path.join(project_folder, Config.get("FRAM_SETTING", "mem_dump_file_folder"))
self.MEM_FILE = os.path.join(self.MEM_FILE_FOLDER, memoryFile)
self.ASSEMBLER_FOLDER = os.path.join(project_folder,
Config.get("FRAM_SETTING", "mem_dump_assembler_folder"))
else:
self.MEM_FILE_FOLDER = os.path.dirname(memoryFile)
self.MEM_FILE = memoryFile
self.ASSEMBLER_FOLDER = os.path.join(self.MEM_FILE_FOLDER, tempF, asseblerF)
self.dump = None
self.BIN_FOLDER = None
self.assemblyWriter = None
self.BIN_FILES = None
self.templateWriter = AtpFromTemplateWriter(self.TEMPLATE_ATP)
return
def writeAssemblers(self):
return self.assemblyWriter.convertToAssembler(self.ASSEMBLER_FOLDER)
def writeTempATP(self):
return
def compileBinaries(self, folder):
self.BIN_FOLDER = os.path.join(self.MEM_FILE_FOLDER, self.MEM_FILE.replace('.', '_'), folder)
try:
self.BIN_FILES = self.assemblyWriter.compileAssemblerFiles(self.BIN_FOLDER)
return self.BIN_FILES
except Exception, e:
log.error(e)
raise e
def execute(self, reset):
return
class MemLoadEr(MemoryControl):
def __init__(self, inputFile=None, templateATP=None):
super(MemLoadEr, self).__init__(inputFile, templateATP)
def readMemFromFile(self):
with open(self.MEM_FILE) as input:
self.dump = input.readlines()
return
def writeTempATP(self):
""" if the Temp file exists, delete it and then create a new temp file, add the parts before the write_mem_word
to the start of the temp file"""
if os.path.isfile(self.tempAtpFile):
os.remove(self.tempAtpFile)
log.warn('File %s already exsists, will be overwritten')
self.templateWriter.generateATP(self.makeAddressValuePairsFromDump(), self.tempAtpFile)
log.info('Successfully completed, file %s created' % self.tempAtpFile)
self.assemblyWriter = AtpToAssembler(self.tempAtpFile)
return self.tempAtpFile
def makeAddressValuePairsFromDump(self):
"""This function is to convert the memory dump into an address value pair which will then be coupled with
the write_mem_word """
addressValueList = []
with open(self.MEM_FILE, 'rU') as input:
lines = input.readlines()
for line in lines:
line = line.replace('\n', '')
if line.startswith('@'):
# This line contains the start address, so we extract it
s = line.replace('@', '')
address = int(s, 16)
elif len(line) > 1 and 'q' not in line:
# This line contains the data, we need to extract
data = line.split()
if len(data) % 2 != 0:
log.error('Memory dump file doesnot contain even data bits in one line')
log.error('error in line %s, contains only %s elements' % (line, len(data)))
raise Exception('MemoryDump')
for i in range(0, len(data), 2):
d = data[i + 1] + data[i]
addressValueList.append({toBin(address, 20): toBin(int(d, 16), 16)})
# log.debug('address: %s data: %s' % (hex(address).upper(), d))
address += 2
input.close()
return addressValueList
def execute(self, reset):
driver = jtag_driver.JTAGDriver(self.BIN_FILES)
driver.executeJTAGCommands(resetPRU=reset)
return jtag_driver.compareResults(self.assemblyWriter.atpOutputSeq)
class MemReadEr(MemoryControl):
def __init__(self, memoryFile, templateATP, startAddr, memSize):
super(MemReadEr, self).__init__(memoryFile, templateATP)
self.startAdd = startAddr
self.memSize = memSize
return
def writeTempATP(self):
if os.path.isfile(self.tempAtpFile):
os.remove(self.tempAtpFile)
log.warn('File %s already exsists, will be overwritten')
addresses = []
for i in range(0, self.memSize, 2):
addresses.append(toBin(self.startAdd + i, 20))
log.debug('address computed, memory will be read from a total of %s address location' % len(addresses))
self.templateWriter.generateATP(addresses, self.tempAtpFile)
self.assemblyWriter = AtpToAssembler(self.tempAtpFile)
return self.tempAtpFile
def execute(self, reset):
driver = jtag_driver.JTAGDriver(self.BIN_FILES)
driver.executeJTAGCommands(resetPRU=reset)
dataRead = jtag_driver.compareResults(self.assemblyWriter.atpOutputSeq)
dataLines = []
for i in range(0, len(dataRead), 8 * 16):
dataLines.append(dataRead[i: i + (8 * 16)])
log.debug('length of data recorded from device : %s' % len(dataRead))
log.debug('data will be written to : %s' % self.MEM_FILE)
with open(self.MEM_FILE, 'wb') as outFile:
outFile.write('@%s\n' % hex(self.startAdd)[2:].upper())
for x in range(0, len(dataLines)):
dataLine = dataLines[x]
for i in range(0, len(dataLine), 16):
data1 = dataLine[i: (i + 8)]
data2 = dataLine[i + 8: (i + 16)]
data1 = hex(int(data1, 2))[2:].upper()
if len(data1) == 1:
data1 = '0' + data1
data2 = hex(int(data2, 2))[2:].upper()
if len(data2) == 1:
data2 = '0' + data2
outFile.write('%s %s ' % (data2, data1))
outFile.write('\n')
outFile.write('p\n')
return dataRead
if __name__ == '__main__':
data = ['1', '1', '0', '0', '1', '1', '0', '1', '1', '0', '1', '0', '1', '0', '1', '1', '0', '0', '0', '1', '0', '0', '1', '0', '1', '1', '1', '0', '1', '1', '1', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '0', '1', '1', '0', '1', '0', '0', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '1', '1', '0', '0', '1', '1', '0', '1', '1', '0', '1', '0', '1', '0', '1', '1', '0', '0', '0', '1', '0', '0', '1', '0', '1', '1', '1', '0', '1', '1', '1', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '0', '1', '1', '0', '1', '0', '0', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '1', '1', '1', '0', '0', '1', '1', '0', '1', '1', '0', '1', '0', '1', '0', '1', '1', '0', '0', '0', '1', '0', '0', '1', '0', '1', '1', '1', '0', '1', '1', '1', '1', '0', '1', '0', '1', '0', '1', '0', '1', '0', '0', '1', '1', '0', '1', '0', '0']
dataRead = ''.join(data)
dataLines = []
for i in range(0, len(dataRead), 8 * 16):
dataLines.append(dataRead[i: i + (8 * 16)])
for y in range (0, len(dataLines)):
data = dataLines[y]
for x in range(0, len(data), 16):
data1 = data[x: (x + 8)]
data2 = data[x + 8: (x + 16)]
data1 = hex(int(data1, 2))[2:].upper()
data2 = hex(int(data2, 2))[2:].upper()
print('%s %s ' % (data2, data1))
| [
"x0234668@ti.com"
] | x0234668@ti.com |
7f1c1019d356e08d34d78dfeead7a1f2cf586207 | 0ade1bd2ac2889972f91ad44a9fe8d45a71525f4 | /onmt/bin/release_model.py | 4a62c4e3bb93b287b5d15270a7fb934bb56b2711 | [
"MIT"
] | permissive | yimeng0701/OpenNMT-py | e0b880b8252779d929c7b04e87dcdba26f36f8ab | 6086cc352f71e92fdbfc0d5ea2e1c8b6aac5ba15 | refs/heads/master | 2020-12-28T12:02:54.490529 | 2020-02-17T09:53:35 | 2020-02-17T09:53:35 | 238,324,874 | 0 | 0 | MIT | 2020-02-04T23:02:50 | 2020-02-04T23:02:49 | null | UTF-8 | Python | false | false | 1,938 | py | #!/usr/bin/env python
import argparse
import torch
def get_ctranslate2_model_spec(opt):
"""Creates a CTranslate2 model specification from the model options."""
is_vanilla_transformer = (
opt.encoder_type == "transformer"
and opt.decoder_type == "transformer"
and opt.position_encoding
and opt.enc_layers == opt.dec_layers
and getattr(opt, "self_attn_type", "scaled-dot") == "scaled-dot"
and getattr(opt, "max_relative_positions", 0) == 0)
if not is_vanilla_transformer:
return None
import ctranslate2
num_heads = getattr(opt, "heads", 8)
return ctranslate2.specs.TransformerSpec(opt.layers, num_heads)
def main():
parser = argparse.ArgumentParser(
description="Release an OpenNMT-py model for inference")
parser.add_argument("--model", "-m",
help="The model path", required=True)
parser.add_argument("--output", "-o",
help="The output path", required=True)
parser.add_argument("--format",
choices=["pytorch", "ctranslate2"],
default="pytorch",
help="The format of the released model")
opt = parser.parse_args()
model = torch.load(opt.model)
if opt.format == "pytorch":
model["optim"] = None
torch.save(model, opt.output)
elif opt.format == "ctranslate2":
model_spec = get_ctranslate2_model_spec(model["opt"])
if model_spec is None:
raise ValueError("This model is not supported by CTranslate2. Go "
"to https://github.com/OpenNMT/CTranslate2 for "
"more information on supported models.")
import ctranslate2
converter = ctranslate2.converters.OpenNMTPyConverter(opt.model)
converter.convert(opt.output, model_spec, force=True)
if __name__ == "__main__":
main()
| [
"vince62s@yahoo.com"
] | vince62s@yahoo.com |
2a6aea3581258e5e52dc9f9fa5e9486ce3b163ae | 046333321b2717c6391a111fc2f74b04bbbeb7af | /chapter8(sets)/loop.py | 87de075c3243648b5ab0ff49eeb9890dfdcdff71 | [] | no_license | jyash28/Python-practice | b0c9df42bc93716d8721a1420ee1f3170b40b18c | cd3a61934618145cbaa20e62194ebb1642ba9941 | refs/heads/main | 2023-07-03T18:06:38.407491 | 2021-07-13T09:47:07 | 2021-07-13T09:47:07 | 314,485,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | s= {"a", "b", "c"}
for item in s:
print(item) | [
"jyash548@gmail.com"
] | jyash548@gmail.com |
0cc20e3d8887ba1c74d03f39a588040dcb82a964 | 9c00eb1b20439663099eed3888750e9030e6170d | /h.py | ee810c0509e0cb21ac917fd6603dc2fd0c86f46d | [] | no_license | ebenz99/GCal2Meet | 50359b56812ad28bcc9330cd1a527212385149fc | 383cb144b1e267ed82a7040e76bfa9c33d967b32 | refs/heads/master | 2023-05-25T06:53:10.910494 | 2020-02-24T04:35:31 | 2020-02-24T04:35:31 | 205,033,990 | 11 | 3 | null | 2023-05-22T22:17:03 | 2019-08-28T22:27:21 | Python | UTF-8 | Python | false | false | 496 | py | import uuid
import hashlib
def hash_password(password):
# uuid is used to generate a random number
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
def check_password(hashed_password, user_password):
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
new_pass = 'hi'
hashed_password = hash_password(new_pass)
print(hashed_password) | [
"ebenz99@gmail.com"
] | ebenz99@gmail.com |
1d10071761bd11aeeb296cfab00a8988d545d81e | c612087393e0ad5d8f963acd3abeeb5293a73274 | /pbs/pbs_plot_atc2.py | a0b82a78e6470882782f3b730134b72c00867a56 | [] | no_license | pirarbaaa/mgo962-lab | 7ed85ae32fd8f4add24439d85e309c9bde3091ee | f41c6494ee4b2e77e5ba0073c3ea2555103858f2 | refs/heads/main | 2023-09-02T04:09:59.081721 | 2021-11-02T13:25:45 | 2021-11-02T13:25:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | import pandas as pd
import matplotlib.pyplot as plt
# load the data
df = pd.read_csv("PBS.csv", header=0, parse_dates=True, index_col=0)
dfs = df.query('ATC2 == "A10"')
dft = dfs.groupby('Month')
dft = dft['Cost'].agg(sum)
dft.plot()
plt.grid()
plt.show()
| [
"stefano.norcia@gmail.com"
] | stefano.norcia@gmail.com |
b987455cd75fa47b77b58128fac2018759a369c3 | 35d25d5f84b6f7670b979590a760ca1565547446 | /CNN_Classifier/svm.py | 162b8ae08e9a196d35faf7a51c23058531cb9541 | [] | no_license | macma/5001CarClassification | 9e702c93cc2e1ed10dbd4c492c3ad2e4f9a9e511 | 269ed9fa43f17f53b7e2d4137d0b15be0b3c3734 | refs/heads/master | 2021-01-23T08:09:44.773745 | 2017-03-28T15:37:26 | 2017-03-28T15:37:26 | 86,477,379 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import svm_feature_loader as sfl
from sklearn.decomposition import PCA
import numpy as np
data = sfl.svm_feature_loader()
features = data['featureList']
labels = data['labelList']
pca = PCA()
pca.fit(features)
print pca.explained_variance_ratio_
'''
top 3 eigenvector explains > 90% of the total variance, PCA is validated.
''' | [
"Mac Ma"
] | Mac Ma |
be547f4005a4c35f8b375f345ecff5192c27b2c1 | e88425e336cc344e198f5c5689620c6e89c4d4f4 | /migrations/versions/cce2ff1416a6_.py | 91b09da1f5e86ef246aeea830db3f944a9767d85 | [] | no_license | mu29/sms-for-sy | b3dd7bb35ee45f87725b9bfd4be3d0269e0ce70e | 6dfb4ac2f903e43d9eb452fad22fc66cb866c6b6 | refs/heads/master | 2021-01-10T06:50:19.981819 | 2016-03-24T07:05:02 | 2016-03-24T07:05:02 | 53,867,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | """empty message
Revision ID: cce2ff1416a6
Revises: None
Create Date: 2016-03-14 23:39:41.360111
"""
# revision identifiers, used by Alembic.
revision = 'cce2ff1416a6'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('teachers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('subject', sa.String(), nullable=True),
sa.Column('age', sa.String(), nullable=True),
sa.Column('school', sa.String(), nullable=True),
sa.Column('phone', sa.String(), nullable=True),
sa.Column('contact', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('teachers')
### end Alembic commands ###
| [
"mu29gl@gmail.com"
] | mu29gl@gmail.com |
82ad3c5cf3f58858f3deef7c1e8edf13bfbcfb1e | 08287b54ff9d19630845c765833e39278f042e58 | /clusterizare.py | bfbd82cfd9240040177724f7827a7f680c286820 | [] | no_license | ioanaandreeab/python_ps | 2769b2468eec055f23335e967a1db02a07dab724 | 59a8a280ff34b16297eae973f360f9b465bdaab1 | refs/heads/master | 2021-04-17T03:18:25.777058 | 2020-03-29T14:47:48 | 2020-03-29T14:47:48 | 249,407,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
angajati = pd.read_csv('angajati_salarii_majorate.csv')
angajati_date = pd.read_csv('angajati_date.csv')
X = np.column_stack([angajati_date['Vechime'], angajati['Salariu']])
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
print(kmeans.cluster_centers_)
print(kmeans.labels_)
f1 = plt.figure()
plt.scatter(X[:, 0], X[:, 1], label='True Position')
f2 = plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=kmeans.labels_, cmap='rainbow')
f3 = plt.figure()
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], color='black')
plt.show()
| [
"45459027+deodre@users.noreply.github.com"
] | 45459027+deodre@users.noreply.github.com |
6a3e030a10cbebfab70aae588fb57d56987317ee | 5899930c0eb199a7b824205f36b9ff09cda01998 | /backend/migrations/0004_auto_20210104_0340.py | 62cecfb7c5bf9f94d62d1b3f12fa4e5581344afa | [] | no_license | HernanGC/WeatherApp | 04ee540da22b50957f07c2b3cd8f13283f084920 | cbde32c1b8c0b6c6a3befe99cb1cd4c207e27037 | refs/heads/master | 2023-03-08T10:13:45.626997 | 2021-02-11T09:57:28 | 2021-02-11T09:57:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # Generated by Django 3.1.4 on 2021-01-04 03:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_headers'),
]
operations = [
migrations.AlterField(
model_name='city',
name='country',
field=models.CharField(blank=True, max_length=40),
),
migrations.AlterField(
model_name='city',
name='region',
field=models.CharField(blank=True, max_length=40),
),
]
| [
"hgonzalez@tiendamia.com"
] | hgonzalez@tiendamia.com |
2ab4c2d868b56f6058aee3dca275a024fa542197 | 0799cc61dc667732f3856cee99b3ce682a4910f8 | /temp_django_poject/wsgi.py | 88cc712a5d20eadc93f06bfba5479a75164c9e6c | [] | no_license | bushuevzi/temp_django_poject | cb087cd1d896f61252b6e4fc07f2e3358ab4d11f | c35fbf88af183475b304ab78bc68cb29239e6d1f | refs/heads/master | 2021-01-22T21:17:30.702686 | 2017-03-18T15:19:12 | 2017-03-18T15:19:12 | 85,410,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | """
WSGI config for temp_django_poject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "temp_django_poject.settings")
application = get_wsgi_application()
| [
"bushuevzi@mail.ru"
] | bushuevzi@mail.ru |
9b266ab9a5dcd13918bc543041dcb89d53261a5a | 5b3cf3b04a75e7b0592a69161c89edafc2a9d72b | /chessboard/main.py | 4760cd35d8191e85f69d5c5b2b0e4b8c1c5e9123 | [] | no_license | pongshy/AlgorithmDesignAndAnalysis | 31a29f472b04e75a56e47677402e8a40229bd85a | 623529ac559ada1f28fee562c5af2d80531d7bff | refs/heads/master | 2023-01-15T15:36:32.724585 | 2020-11-07T12:21:47 | 2020-11-07T12:21:47 | 302,087,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,860 | py | # 棋盘覆盖 #
import tkinter as tk
import copy
import threading
from tkinter import ALL
title = 1
# board = [[0 for i in range(8)] for j in range(8)]
global board
global tempBoard
tempBoard = list()
colors = ['red', 'green', 'pink', 'blue', 'gray', 'orange', 'purple']
index = 0
# 核心算法
def chessBoard(tr, tc, dr, dc, size, depth=-1):
global title
# global board
if size == 1:
return
s = int(size / 2)
t = int(title)
title += 1
d = depth + 1
# 特殊方格在左上角
if dr < tr + s and dc < tc + s:
chessBoard(tr, tc, dr, dc, s, d)
else:
board[tr + s - 1][tc + s - 1] = [t, d]
chessBoard(tr, tc, tr + s - 1, tc + s - 1, s, d)
# 特殊方格在右上角
if dr < tr + s and dc >= tc + s:
chessBoard(tr, tc + s, dr, dc, s, d)
else:
board[tr + s - 1][tc + s] = [t, d]
chessBoard(tr, tc + s, tr + s - 1, tc + s, s, d)
# 特殊方格在左下角
if dr >= tr + s and dc < tc + s:
chessBoard(tr + s, tc, dr, dc, s, d)
else:
board[tr + s][tc + s - 1] = [t, d]
chessBoard(tr + s, tc, tr + s, tc + s - 1, s, d)
# 特殊方格在右下角
if dr >= tr + s and dc >= tc + s:
chessBoard(tr + s, tc + s, dr, dc, s, d)
else:
board[tr + s][tc + s] = [t, d]
chessBoard(tr + s, tc + s, tr + s, tc + s, s, d)
# # 特殊方格在左上角
# if dr < tr + s and dc < tc + s:
# chessBoard(tr, tc, dr, dc, s)
# else:
# board[tr + s - 1][tc + s - 1] = int(t)
# chessBoard(tr, tc, tr + s - 1, tc + s - 1, s)
# # 特殊方格在右上角
# if dr < tr + s and dc >= tc + s:
# chessBoard(tr, tc + s, dr, dc, s)
# else:
# board[tr + s - 1][tc + s] = int(t)
# chessBoard(tr, tc + s, tr + s - 1, tc + s, s)
# # 特殊方格在左下角
# if dr >= tr + s and dc < tc + s:
# chessBoard(tr + s, tc, dr, dc, s)
# else:
# board[tr + s][tc + s - 1] = int(t)
# chessBoard(tr + s, tc, tr + s, tc + s - 1, s)
# # 特殊方格在右下角
# if dr >= tr + s and dc >= tc + s:
# chessBoard(tr + s, tc + s, dr, dc, s)
# else:
# board[tr + s][tc + s] = int(t)
# chessBoard(tr + s, tc + s, tr + s, tc + s, s)
# 深拷贝
# tmp = copy.deepcopy(board)
# if not tempBoard.__contains__(tmp):
# tempBoard.append(tmp)
print(board)
# 画出已经摆好的棋盘
def drawboard(canvas1, board, colors, startx=50, starty=50, cellwidth=50):
width = 2 * startx + len(board) * cellwidth
height = 2 * starty + len(board) * cellwidth
canvas1.config(width=width, height=height) # 布置画布
for i in range(len(board)):
for j in range(len(board)):
tindex = board[i][j][0]
if tindex == 0:
color = 'white' # 特殊方格显示为白色
else:
tcolor = (0 + board[i][j][1] * 60, 230 - board[i][j][1] * 35, 180 - board[i][j][1] * 25)
# color = colors[tindex % len(colors)] # 为了间隔开颜色
color = Rgb_To_Hex(tcolor)
cellx = startx + j * 50
celly = starty + i * 50
canvas1.create_rectangle(cellx, celly, cellx + cellwidth, celly + cellwidth, fill=color,
outline="#000000") # 画方格
canvas1.create_text(cellx + cellwidth / 2, celly + cellwidth / 2, text=str(tindex))
canvas1.update()
global title
title = 1
# 分步绘制棋盘
def drawOneByOne(canvas1, board, colors, startx=50, starty=50, cellwidth=50):
width = 2 * startx + len(board) * cellwidth
height = 2 * starty + len(board) * cellwidth
canvas1.config(width=width, height=height) # 布置画布
global index
for i in range(len(board)):
for j in range(len(board)):
tindex = board[i][j][0]
cellx = startx + j * 50
celly = starty + i * 50
canvas1.create_rectangle(cellx, celly, cellx + cellwidth, celly + cellwidth, fill='White',
outline="black") # 画方格
color = ""
if tindex == 0:
color = 'white' # 特殊方格显示为白色
cellx = startx + j * 50
celly = starty + i * 50
canvas1.create_rectangle(cellx, celly, cellx + cellwidth, celly + cellwidth, fill=color,
outline="black") # 画方格
canvas1.create_text(cellx + cellwidth / 2, celly + cellwidth / 2, text=str(tindex))
elif index + 1 >= tindex:
tcolor = (0 + board[i][j][1] * 50, 250 - board[i][j][1] * 40, 154 - board[i][j][1] * 25)
# color = colors[tindex % len(colors)] # 为了间隔开颜色
color = Rgb_To_Hex(tcolor)
cellx = startx + j * 50
celly = starty + i * 50
canvas1.create_rectangle(cellx, celly, cellx + cellwidth, celly + cellwidth, fill=color, outline="black") # 画方格
canvas1.create_text(cellx + cellwidth / 2, celly + cellwidth / 2, text=str(tindex))
canvas1.update()
index += 1
# 直接画出最后结果
def drawAll():
n = int(var1.get())
row = int(var2.get())
col = int(var3.get())
global board
board = [[[0, 0] for i in range(n)] for j in range(n)]
chessBoard(0, 0, row, col, n)
tmp_root = tk.Tk()
tmp_root.title('图')
window_tmp = tk.Canvas(tmp_root, width=600, height=600)
window_tmp.pack()
drawboard(window_tmp, board, colors, 50, 50, 50)
# 开始逐步显示
def drawOne():
drawOneByOne(window, board, colors, 50, 200, 50)
# 结束分步显示,并清空index和图像
def closeWin():
tempBoard.clear()
window.delete(ALL)
global index
index = 0
print('delete')
n = (var1.get())
global board
board = [[[0, 0] for i in range(n)] for j in range(n)]
# RGB格式颜色转化为16进制格式颜色
def Rgb_To_Hex(rgb):
# 元组
color = "#"
for i in rgb:
num = int(i)
color += str(hex(num))[-2:].replace('x', '0').upper()
print(color)
return color
def draw(tmpWin, tmpB, colors):
global index
# if index < len(tempBoard) - 1:
# print(index)
# index += 1
# drawboard(tmpWin, tmpB, colors, 50, 200, 50)
# else:
# print("index超出")
print(index)
drawboard(tmpWin, tmpB, colors, 50, 200, 50)
# index += 1
if __name__ == '__main__':
root = tk.Tk()
root.title("棋盘覆盖")
window = tk.Canvas(root, width=340, height=300)
window.pack()
# 初始化存储棋盘的二维数组
# 棋盘规格
var1 = tk.StringVar()
# 特殊方格所处的行号
var2 = tk.StringVar()
# 特殊方格所处的列号
var3 = tk.StringVar()
tk.Label(root, text='请输入棋盘规格: ').place(x=15, y=10)
inputEntity = tk.Entry(root, textvariable=var1)
inputEntity.place(x=130, y=10)
tk.Label(root, text='请输入特殊方格所处行号: ').place(x=15, y=40)
inputEntity1 = tk.Entry(root, textvariable=var2)
inputEntity1.place(x=170, y=40)
tk.Label(root, text='请输入特殊方格所处列号: ').place(x=15, y=70)
inputEntity2 = tk.Entry(root, textvariable=var3)
inputEntity2.place(x=170, y=70)
button1 = tk.Button(root, text='显示最后结果', command=drawAll)
button1.place(x=20, y=130)
index = 0
button2 = tk.Button(root, text='分步显示', command=drawOne)
button2.place(x=150, y=130)
button3 = tk.Button(root, text='结束分步', command=closeWin)
button3.place(x=240, y=130)
button4 = tk.Button(root, text='清空', command=lambda x=ALL: window.delete(x))
root.mainloop()
| [
"13567172019@163.com"
] | 13567172019@163.com |
f88cea3cbe1f7d17bf699718b33c7716b846d5b1 | cb2db9d7f3e4f5e965b149dec720153b75abd74f | /VisualizerGraph.py | f0f5bf4aa4de4038d391316c79e24026d3be9e1d | [] | no_license | OshiniBandara/MaximumFlow | 5142875ca98b4456172e630addf2f4eb47b4174e | c2086a21e29f437fadb97533ec0e9f21930adbe0 | refs/heads/master | 2022-02-07T17:27:51.619368 | 2019-05-12T08:15:17 | 2019-05-12T08:15:17 | 186,228,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,899 | py | #w1673661 - UOW no
#2017063 - IIT student no
#Name - chandanam bandara (uow name)
# oshini ruksala bandara
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, draw, show
class Visualizer:
#variables defined X-Y Layout as xyl, Orginal matrix of the net work flow as org_mtrx, Residual network as res,marix as mtrx
def __init__(self, mtrx, fN, len, wid, xyl=None,org_mtrx=None, res=False):
#make know xNetwork graph for a known structure
#nx.Digraph, stores nodes and edges with optional data or attributes and it holds directed edges
self.fG = nx.to_networkx_graph(mtrx, create_using=nx.DiGraph);
#plot for the network and figure number for that
self.plotFN(fN)
#matrix of the network
self.mtrx = mtrx
#layout of the flow
self.p = None
#residual
self.res = res
#original matrix of the network flow
self.org_mtrx = org_mtrx
if xyl is None:
#ggraph function,object inheriting
#this function creating layout for the plot based on the graph
self.createL()
else:
#flow the layout to xy Layout
self.p = xyl
#setting capacities for edges
self.setEC(len, wid)
#edge two-tuple of the text label, edges_label is a keyword
#creating labels for edges
eLables = self.createEL()
#creating a value for nodes, need to number every node to take the augmenting path
val = self.createNV()
#flow graph for a flow layout,
#dict of labels keyed on the edges will give the return
nx.draw_networkx_edge_labels(self.fG, self.p, edge_labels=eLables)
#dict of labels keyed on the nodes will give the return
nx.draw_networkx_labels(self.fG, self.p)
#line collection of edges will give the return
nx.draw_networkx_edges(self.fG, self.p, arrows=True)
#draw the g graph with the matplotlib
nx.draw(self.fG, self.p, node_color=val, node_size=400, edge_cmap=plt.cm.Reds)
#creating labels for edges
def createEL(self):
eLables = {}
for s_node, d_node, dictionary in self.fG.edges(data=True):
eLables[(s_node, d_node)] = (dictionary['capacity'], dictionary['flow'])
return eLables;
#creating values for nodes
def createNV(self):
val = [1.0 for node in self.fG.nodes()]
return val
#creating network flow plot
def plotFN(self, fN):
plt.figure(fN)
#display the plot
def plotShow(self):
plt.show()
# creating layout
# for the X and Y flow layout
def createL(self):
self.p = nx.spring_layout(self.fG)
#setting capacities for edges
def setEC(self, len, wid):
#for the nodes in the range of width,
for nd in range(wid):
#and i, which is in the range of length
for i in range(len):
if self.mtrx[nd][i] > 0:
# in original matrix
#before executing through the FordFulkerson algorithm
#node which is searched currently and the value of i is zero
#it says this is a flow value
if (self.org_mtrx is not None and self.mtrx[nd][i] > 0 and self.org_mtrx[nd][ i] == 0):
self.fG[nd][i]['flow'] = self.mtrx[nd][i]
self.fG[nd][i]['capacity'] = self.org_mtrx[i][nd]
else:
self.fG[nd][i]['flow'] = 0
self.fG[nd][i]['capacity'] = self.mtrx[nd][i]
#getting flow graph
def get_fG(self):
#returning flow graph
return self.fG
#setting layout
def setL(self, p):
#returning flow layout
self.p = p
#getting layout
def getL(self):
#returning flow layout
return self.p | [
"noreply@github.com"
] | OshiniBandara.noreply@github.com |
55c6c6db5b0558e9e768acc37f3b1ee25d259e53 | 6b4e02f52ac60a1b102595f8d941f4cc9afa58df | /section6/11. 수들의 조합.py | ddcae53818de9bdbcca1a55ddc4d53c604798e52 | [] | no_license | jioniy/algoritm-problem-solving | f6c525ab2df1c921c09c4191cdf5439f302013ba | 7684eabc0c24095717bf84598a2282e9b76f46e4 | refs/heads/main | 2023-07-24T11:45:34.907772 | 2021-08-30T02:50:36 | 2021-08-30T02:50:36 | 383,208,502 | 1 | 0 | null | 2021-07-05T16:52:24 | 2021-07-05T16:52:23 | null | UTF-8 | Python | false | false | 410 | py | import sys
sys.stdin=open("input.txt", "r")
def DFS(L,v):
global cnt
if L==k:
if sum(res)%m==0:
cnt+=1
else:
for i in range(v,n):
res[L]=a[i]
DFS(L+1,i+1)
res[L]=0
if __name__=="__main__":
n,k=map(int, input().split())
a=list(map(int, input().split()))
res=[0]*k
m=int(input())
cnt=0
DFS(0,0)
print(cnt)
| [
"jw010208@gmail.com"
] | jw010208@gmail.com |
daa4cea0f0df03fd4be44c08441d82e845be1513 | 4063dc7fbd0fbb15e6ccb4c79dfa199010c0c52b | /authentication/views.py | 5cbce77abc9358a37a2c1dbb048d0a8ea6e6ab8c | [] | no_license | kritikaarora/JwtAuth | a790c0bc37fabf4e601c5d00fefce6df65de797d | 1ecd8f5ead5a16325a3cbfd26dda2ae042ecd5b6 | refs/heads/master | 2022-11-28T18:07:44.694391 | 2020-08-14T08:17:42 | 2020-08-14T08:17:42 | 287,477,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | import jwt
from django.shortcuts import render
from rest_framework.generics import GenericAPIView
from .serialzers import UserSerializer,LoginSerializer
from rest_framework.response import Response
from rest_framework import status
from django.conf import settings
from django.contrib import auth
# Create your views here.
class RegisterView(GenericAPIView):
serializer_class=UserSerializer
def post(self,request):
serializer=UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)
class LoginView(GenericAPIView):
serializer_class=LoginSerializer
def post(self,request):
data=request.data
username=data.get('username','')
password=data.get('password','')
user = auth.authenticate(username=username, password=password)
if user:
auth_token=jwt.encode(
{'username': user.username}, settings.JWT_SECRET_KEY)
serializer = UserSerializer(user)
data = {'user': serializer.data, 'token': auth_token}
return Response(data, status=status.HTTP_200_OK)
# SEND RES
return Response({'detail': 'Invalid credentials'}, status=status.HTTP_401_UNAUTHORIZED) | [
"kritika.arora@manprax.com"
] | kritika.arora@manprax.com |
bf4713f9f8ffaef272587b1760977a819cbcf0cc | 1b23d40cb792e348971c8f1464883fd5ac538fc8 | /jpeg.py | 7ce2a29a542e5d542c6b7093f434fb889233a7fd | [] | no_license | yoshi-corleone/img-metadata | 42b5e10be3e53c4bf752c1e2eca16daf717173d8 | c80073098b4af8f0ad6da7fc04c4c18e001bded7 | refs/heads/master | 2022-12-10T02:33:55.248148 | 2018-02-15T14:08:29 | 2018-02-15T14:08:29 | 121,592,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | import struct
from exif import Exif
class Jpeg:
__offset = 0
__SOI = (b'\xFF', b'\xD8')
__APP1 = (b'\xFF', b'\xE1')
__SOS = (b'\xFF', b'\xDA')
__EOI = (b'\xFF', b'\xD9')
__SOFs = [
(b'\xFF', b'\xC0'),
(b'\xFF', b'\xC1'),
(b'\xFF', b'\xC2'),
(b'\xFF', b'\xC3'),
(b'\xFF', b'\xC5'),
(b'\xFF', b'\xC6'),
(b'\xFF', b'\xC7'),
(b'\xFF', b'\xC9'),
(b'\xFF', b'\xCA'),
(b'\xFF', b'\xCB'),
(b'\xFF', b'\xCD'),
(b'\xFF', b'\xCE'),
(b'\xFF', b'\xCF'),
]
@staticmethod
def can_parse(data):
magic_number = struct.unpack_from("2c", data)
return magic_number == (b'\xFF', b'\xD8')
def parse(self, jpeg):
self.__offset = 2
result = {}
while True:
segment_marker = struct.unpack_from("2c", jpeg, self.__offset)
self.__offset += 2
if segment_marker == self.__SOS:
break
if segment_marker == self.__EOI:
break
segment_length = struct.unpack_from(">H", jpeg, self.__offset)[0]
for frame_header_marker in self.__SOFs:
if segment_marker == frame_header_marker:
(height, width, channels) = struct.unpack_from(">HHB", jpeg, self.__offset + 3)
result["width"] = width
result["height"] = height
result["mode"] = self.__get_color_mode(channels)
break
if segment_marker == self.__APP1:
app1_magic = struct.unpack_from("6c", jpeg, self.__offset + 2)
if app1_magic == (b'\x45', b'\x78', b'\x69', b'\x66', b'\x00', b'\x00'):
exif_parser = Exif(jpeg, self.__offset + 8, segment_length)
tags = [
(271, "maker"),
(272, "model"),
(2, "latitude"),
(4, "longitude"),
(36867, "DateTimeOriginal")
]
for tag in tags:
try:
value = exif_parser.search_tag(jpeg, target_tag=tag[0], clear_offset=True)
result[tag[1]] = value
except ValueError:
pass
self.__offset += segment_length
return result
def __get_color_mode(self, channels):
if channels == 1:
return "Grayscale"
elif channels == 3:
return "RGB"
elif channels == 4:
return "CMYK"
else:
return "Unknown" | [
"yoshi.corleone@gmail.com"
] | yoshi.corleone@gmail.com |
cabf056914706d3bbbca0316a583ad7fc2476ecd | 81b91aa7700b4a20bc2fce0d44e4c71edefe20b2 | /djangoEX/forestapps/employee/migrations/0001_initial.py | d4a549f1ee2911a08d588e4c09e5afaccb317b05 | [] | no_license | prapanpong/git | 41ac4ead2024b1d08a477bee32f67bdf37198d8f | 7b703511d3cb8caffbcf46e2aa1765dc9f1fda9d | refs/heads/master | 2020-03-29T22:50:25.131740 | 2018-09-28T09:52:23 | 2018-09-28T09:52:23 | 150,443,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # Generated by Django 2.1.1 on 2018-09-28 06:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('eid', models.CharField(max_length=20)),
('ename', models.CharField(max_length=100)),
('econtact', models.CharField(max_length=15)),
],
options={
'db_table': 'employee',
},
),
]
| [
"noreply@github.com"
] | prapanpong.noreply@github.com |
e5b9a7fc5ea38892a56d547e0046589d05ae570b | 916295e4a542279b3c0af9f26ec9dba6ff536b49 | /pytorch-start/pytorch/tutorials/beginner_source/blitz/neural_networks_tutorial.py | 5ddc0260a98c1dcfa38e373fc994ebfc2a135e2e | [
"BSD-3-Clause"
] | permissive | thu-skyworks/vision-mission | 0f94251ee4c967a6e1fd7175c4cf95346c863fe7 | 45a6eb2972b4c877be88c3fe065e638e622f5917 | refs/heads/master | 2021-05-06T18:01:39.301444 | 2017-12-23T13:53:53 | 2017-12-23T13:53:53 | 111,934,517 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,463 | py | # -*- coding: utf-8 -*-
"""
Neural Networks
===============
Neural networks can be constructed using the ``torch.nn`` package.
Now that you had a glimpse of ``autograd``, ``nn`` depends on
``autograd`` to define models and differentiate them.
An ``nn.Module`` contains layers, and a method ``forward(input)``\ that
returns the ``output``.
For example, look at this network that classfies digit images:
.. figure:: /_static/img/mnist.png
:alt: convnet
convnet
It is a simple feed-forward network. It takes the input, feeds it
through several layers one after the other, and then finally gives the
output.
A typical training procedure for a neural network is as follows:
- Define the neural network that has some learnable parameters (or
weights)
- Iterate over a dataset of inputs
- Process input through the network
- Compute the loss (how far is the output from being correct)
- Propagate gradients back into the network’s parameters
- Update the weights of the network, typically using a simple update rule:
``weight = weight - learning_rate * gradient``
Define the network
------------------
Let’s define this network:
"""
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
########################################################################
# You just have to define the ``forward`` function, and the ``backward``
# function (where gradients are computed) is automatically defined for you
# using ``autograd``.
# You can use any of the Tensor operations in the ``forward`` function.
#
# The learnable parameters of a model are returned by ``net.parameters()``
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1's .weight
########################################################################
# The input to the forward is an ``autograd.Variable``, and so is the output.
# Note: Expected input size to this net(LeNet) is 32x32. To use this net on
# MNIST dataset,please resize the images from the dataset to 32x32.
input = Variable(torch.randn(1, 1, 32, 32))
out = net(input)
print(out)
########################################################################
# Zero the gradient buffers of all parameters and backprops with random
# gradients:
net.zero_grad()
out.backward(torch.randn(1, 10))
########################################################################
# .. note::
#
# ``torch.nn`` only supports mini-batches The entire ``torch.nn``
# package only supports inputs that are a mini-batch of samples, and not
# a single sample.
#
# For example, ``nn.Conv2d`` will take in a 4D Tensor of
# ``nSamples x nChannels x Height x Width``.
#
# If you have a single sample, just use ``input.unsqueeze(0)`` to add
# a fake batch dimension.
#
# Before proceeding further, let's recap all the classes you’ve seen so far.
#
# **Recap:**
# - ``torch.Tensor`` - A *multi-dimensional array*.
# - ``autograd.Variable`` - *Wraps a Tensor and records the history of
# operations* applied to it. Has the same API as a ``Tensor``, with
# some additions like ``backward()``. Also *holds the gradient*
# w.r.t. the tensor.
# - ``nn.Module`` - Neural network module. *Convenient way of
# encapsulating parameters*, with helpers for moving them to GPU,
# exporting, loading, etc.
# - ``nn.Parameter`` - A kind of Variable, that is *automatically
# registered as a parameter when assigned as an attribute to a*
# ``Module``.
# - ``autograd.Function`` - Implements *forward and backward definitions
# of an autograd operation*. Every ``Variable`` operation, creates at
# least a single ``Function`` node, that connects to functions that
# created a ``Variable`` and *encodes its history*.
#
# **At this point, we covered:**
# - Defining a neural network
# - Processing inputs and calling backward.
#
# **Still Left:**
# - Computing the loss
# - Updating the weights of the network
#
# Loss Function
# -------------
# A loss function takes the (output, target) pair of inputs, and computes a
# value that estimates how far away the output is from the target.
#
# There are several different
# `loss functions <http://pytorch.org/docs/nn.html#loss-functions>`_ under the
# nn package .
# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error
# between the input and the target.
#
# For example:
output = net(input)
target = Variable(torch.arange(1, 11)) # a dummy target, for example
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
########################################################################
# Now, if you follow ``loss`` in the backward direction, using it’s
# ``.grad_fn`` attribute, you will see a graph of computations that looks
# like this:
#
# ::
#
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
# -> view -> linear -> relu -> linear -> relu -> linear
# -> MSELoss
# -> loss
#
# So, when we call ``loss.backward()``, the whole graph is differentiated
# w.r.t. the loss, and all Variables in the graph will have their
# ``.grad`` Variable accumulated with the gradient.
#
# For illustration, let us follow a few steps backward:
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
########################################################################
# Backprop
# --------
# To backpropagate the error all we have to do is to ``loss.backward()``.
# You need to clear the existing gradients though, else gradients will be
# accumulated to existing gradients
#
#
# Now we shall call ``loss.backward()``, and have a look at conv1's bias
# gradients before and after the backward.
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
########################################################################
# Now, we have seen how to use loss functions.
#
# **Read Later:**
#
# The neural network package contains various modules and loss functions
# that form the building blocks of deep neural networks. A full list with
# documentation is `here <http://pytorch.org/docs/nn>`_
#
# **The only thing left to learn is:**
#
# - updating the weights of the network
#
# Update the weights
# ------------------
# The simplest update rule used in practice is the Stochastic Gradient
# Descent (SGD):
#
# ``weight = weight - learning_rate * gradient``
#
# We can implement this using simple python code:
#
# .. code:: python
#
# learning_rate = 0.01
# for f in net.parameters():
# f.data.sub_(f.grad.data * learning_rate)
#
# However, as you use neural networks, you want to use various different
# update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc.
# To enable this, we built a small package: ``torch.optim`` that
# implements all these methods. Using it is very simple:
import torch.optim as optim
# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# in your training loop:
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update
| [
"304600203@qq.com"
] | 304600203@qq.com |
dc2bb603f797dcefafc8b6662c41bbb14a89bd90 | 8e9375a8ca4bb333d85d8d89179746abea4cb4a3 | /seloger/seloger/spiders/seloger_spider.py | 02e0e7106a4eddb233cb0763273c0c5b9fedfda6 | [] | no_license | adipasquale/seloger-spider | 2b875b0ca4898cae27f71b038260acad6b0071f7 | a22db298a2486c48d2d3ba753ac31f5046f51985 | refs/heads/master | 2021-01-01T03:48:41.098863 | 2016-05-09T20:45:38 | 2016-05-09T20:45:38 | 57,156,560 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # -*- coding: utf-8 -*-
import scrapy
from seloger.items import Offer
IGNORE_WORDS_IN_TITLE = [
u"Location", u"Appartement", u"2 pièces", u"appartement F2/T2/2 pièces"
]
class SeLogerSpider(scrapy.Spider):
name = "seloger"
allowed_domains = ["seloger.com"]
start_urls = [
"http://www.seloger.com/annonces/locations/appartement/paris-3eme-75/enfants-rouges/108976823.htm?"
]
def get_meta(self, name):
return response.xpath('//meta[@property="og:%s"]/@content' % name).extract()[0]
def parse(self, response):
offer = Offer()
offer["url"] = response.url
offer["title"] = response.xpath('//meta[@property="og:title"]/@content').extract()[0]
for word in IGNORE_WORDS_IN_TITLE:
offer["title"] = offer["title"].replace(word, "")
offer["title"].strip()
offer["description"] = response.xpath('//meta[@property="og:description"]/@content').extract()[0].strip()
offer["images"] = response.css('.carrousel_image_small::attr("src")').extract()
offer["characteristics"] = response.css(".liste__item-switch, .liste__item-float, .liste__item").xpath("text()").extract()
offer["characteristics"] = [c.strip() for c in offer["characteristics"] if "DPE" not in c and "GES" not in c and c.strip()]
yield offer
| [
"adrien@drivy.com"
] | adrien@drivy.com |
07691b7c3a4035b4adf4d5074099297a3d2ec4a1 | 39163e95069939fd2a6cf94ebf0a5efc7406828c | /backend/selab/manage.py | 14d7f43745d436f6994bc5eef1c37c017d209096 | [] | no_license | sh1v4mr4j/se_lab | d92a50ccaec9346e8bc17cfc1b34f8d734f6875f | f1932077f1ca3ed454fe72229a20850d3a5c5645 | refs/heads/master | 2022-09-22T23:07:44.469484 | 2020-05-31T05:15:42 | 2020-05-31T05:15:42 | 268,213,663 | 0 | 0 | null | 2020-05-31T05:14:24 | 2020-05-31T05:14:23 | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'selab.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"utkarshkoushik007@gmail.com"
] | utkarshkoushik007@gmail.com |
90cd1505870730f313d38dffb35a8c1db20d3b74 | 3e18d70eb8d27d971405590e2f43c020308e4e02 | /SouthFilmIndustry/Tollywood/urls.py | aadc5c845981aeecc84f02bb35e2f6a112dd416b | [] | no_license | aravind1910/Django-Projects | 0832712fcbebf292c31ae26c71949cbaa5f63935 | cf28f9d7e5f876281cda89ed95f39cac346a3ec3 | refs/heads/master | 2022-12-25T18:58:48.347038 | 2020-09-23T23:29:30 | 2020-09-23T23:29:30 | 298,117,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from django.urls import path
from Tollywood import views
urlpatterns=[
path('',views.index,name="index"),
path('register/',views.register,name="register"),
path('showdata/',views.showdata,name="showdata"),
path('edit/<int:id>',views.edit,name='edit'),
path('edit2/<int:id>',views.edit2,name='edit2'),
path('delete/<int:id>',views.delete,name='delete'),
path('contact/',views.contact,name='contact'),
] | [
"aravindchintalapudi1910@gmail.com"
] | aravindchintalapudi1910@gmail.com |
90275913f01d0f987295a420d868051d6b52a606 | bfe78647180580c51d37d890530842884c6eef94 | /comma_code.py | 84aef1b30f586d3ae66d34df7e0a7979bc5085b5 | [] | no_license | michaeltu01/personal-py | 9fd688a421af3a49cecfbaa9d24d9c304418cd79 | d46422a0951bb71df57aa51069630cba6c6561d4 | refs/heads/master | 2023-08-23T06:17:01.745534 | 2021-10-16T16:25:51 | 2021-10-16T16:25:51 | 413,210,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | def user_input():
list = []
while True:
item = input("Enter an item into the list (enter 'q' to quit): ")
if(item == "q" or item == "Q"):
break
list.append(item)
return list
def comma_code(list):
print("\nHere is your list: ")
for i in range(0, len(list)):
if(len(list) == 1):
print(list[i])
elif(i == (len(list) - 1)):
print("and", list[i])
else:
print(list[i] + ',', end = ' ')
comma_code(user_input()) | [
"michaeltu705@gmail.com"
] | michaeltu705@gmail.com |
88907f1f362c2ea5fe1d2ff81c18f75fb787a3cf | 9ee5643cfb2d76e6582ef7b985564f5c2afea6d1 | /src/modules/utils/SConscript | 0048cc3920c4595766c099de137e790c3068f967 | [
"BSD-2-Clause"
] | permissive | securesonic/safekiddo-backend | 0f50f644adaf1f93794c8de922bf3f23750e811c | c94db1ee35eebfd2a7faef072802d1bddc789d0f | refs/heads/master | 2021-01-24T21:36:27.499034 | 2018-02-28T11:15:46 | 2018-02-28T11:15:46 | 123,274,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | env = hEnv().Clone()
libs = hSplit("""
boost_thread
boost_date_time
ssl
crypto
""")
hDynamicLib("utils",
libs = libs,
srcs = ["*.cpp"],
env = env
)
| [
"marcin.marzec@safekiddo.com"
] | marcin.marzec@safekiddo.com | |
8eb18f1e7fefdb6d356d52ea5748c6a1d947302a | a0dc0a148e1cd3ebbbcff64699c957ae6dfa5635 | /python/A_Byte_Of_Python/try_except.py | 5d1a2cc686a877664e54062b247fb59072d143f4 | [] | no_license | kshitijgupta/all-code | eb4a77e9ec3fef330c852978615fd6c6c27a40f3 | c90a5b23ae4269c72444e8593c90d378e60a6a1c | refs/heads/master | 2020-06-04T22:17:42.563870 | 2012-08-19T14:45:10 | 2012-08-19T14:45:10 | 42,383,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | #!/usr/bin/python
#coding=UTF-8
import sys
try:
s = raw_input('Enter something -->')
except EOFError:
print '\nWhy did you do an EOF on me?'
sys.exit()
except:
print '\nSome error / exception occurred'
print 'Done'
| [
"luoleicn@b6497320-2115-11df-b9f8-bfe9128881a6"
] | luoleicn@b6497320-2115-11df-b9f8-bfe9128881a6 |
fc96372a7968873980eb61ce5a692d20cca85fe8 | cd659b3b389bd5271ab1eff3867df2a8dd3ec294 | /StudentMgmtSystem/code.py | 26215d312180945bbbdecb6160f2b1b96c26823e | [
"MIT"
] | permissive | Rishil96/ga-learner-dsmp-repo | 2194c195529847bc63cbfaea63e23d8c9f4e04cc | 89dd5f23cf1385c76ca91049e3e9bb13ca330df9 | refs/heads/master | 2020-12-15T09:45:20.294756 | 2020-03-26T07:10:56 | 2020-03-26T07:10:56 | 235,065,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | # --------------
# Code starts here
class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2=['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class=class_1+class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
del new_class[5]
print(new_class)
# Code ends here
# --------------
# Code starts here
courses={'Math':65,'English':70,'History':80,'French':70,'Science':60}
total=courses['Math']+courses['English']+courses['History']+courses['French']+courses['Science']
print(total)
percentage=(total*100/500)
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics={'Geoffrey Hinton':78,'Andrew Ng':95,'Sebastian Raschka':65,'Yoshua Benjio':50,'Hilary Mason':70,'Corinna Cortes':66,'Peter Warden':75}
topper=max(mathematics,key=mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
first_name=topper.split()[0]
print(first_name)
last_name=topper.split()[1]
print(last_name)
full_name=last_name+" "+first_name
print(full_name)
certificate_name=full_name.upper()
print(certificate_name)
# Code ends here
| [
"Rishil96@users.noreply.github.com"
] | Rishil96@users.noreply.github.com |
93bf25711d87fed54990d2bbd5afc3692eec901e | 880245e37bb8d92fe04b30c0f995fa03d4dcedce | /round2/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x_b1_time.py | 182509b8d4acff1ede350e9e5525fb5106b2c6df | [
"Apache-2.0"
] | permissive | feifei-Liu/mmdetection_guangdong | 425354ae7e845d165fc2b747c95f6660fb588a96 | 7f912251e00c7101f607f87f1db99c1fa9b533cd | refs/heads/master | 2020-12-26T18:44:06.161233 | 2019-10-14T02:40:49 | 2019-10-14T02:40:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,162 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained=None,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=False,
groups=32,
deformable_groups=1,
fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 10.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2, # set small class , test time
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=900)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'MyDataset_defect_round2'
data_root = '/home/zhangming/Models/Results/cloth_flaw_detection/Datasets_2/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='MinMaxIoURandomCrop',
min_ious=(0, 1, 0.8, 0.85, 0.91, 0.93, 0.95), # 给1 直接出来
min_crop_size=0.3,
max_crop_size=0.50,
),
dict(type='Resize', img_scale=(1024, 425), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_crop = dict(flag=True, patch_size = (2,2))
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
# img_scale=[(1024, 425), (1280, 532)], # 多尺度测试
#img_scale=[(2048,900),(1960,861)], # 多尺度测试
img_scale=(2048,900), # 多尺度测试
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'layout/crop_train_925.json',
img_prefix=data_root + 'new_defect/crop_defect/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'layout/crop_val_925.json',
img_prefix=data_root + 'new_defect/crop_defect/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=None,
img_prefix=None,
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x_b1_time'
load_from = '/home/zhangming/work/kaggle/mmdetection/checkpoints/faster_rcnn_dconv_c3-c5_x101_32x4d_fpn_1x_20190201-6d46376f.pth'
resume_from = None
# workflow = [('train', 1), ('val', 1)]
workflow = [('train', 1)]
| [
"ZM@120"
] | ZM@120 |
c4b15b9fdc71ce9b58ce624174cf96fda9b7b7b6 | 30eb4b75f665fc88695f021ce0ebfbf19faedbe6 | /tests/sentry/integrations/gitlab/test_repository.py | f56bc24df07717fa377dcca40bcf2ea98b1a3184 | [
"BSD-2-Clause"
] | permissive | ufosky-server/sentry | 3b4ca82308be8139b580171417c4188a4f6cdce3 | eda0cc478e93bd8a7b060f7f0e21b86734e3405a | refs/heads/master | 2020-04-03T19:41:51.115904 | 2018-10-31T08:55:41 | 2018-10-31T08:55:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,328 | py | from __future__ import absolute_import
import responses
import pytest
from exam import fixture
from django.core.urlresolvers import reverse
from sentry.integrations.exceptions import IntegrationError
from sentry.integrations.gitlab.repository import GitlabRepositoryProvider
from sentry.models import (
Identity,
IdentityProvider,
Integration,
Repository,
CommitFileChange
)
from sentry.testutils import PluginTestCase
from sentry.utils import json
from .testutils import (
COMPARE_RESPONSE,
COMMIT_LIST_RESPONSE,
COMMIT_DIFF_RESPONSE
)
commit_file_type_choices = {c[0] for c in CommitFileChange._meta.get_field('type').choices}
class GitLabRepositoryProviderTest(PluginTestCase):
provider_name = 'integrations:gitlab'
def setUp(self):
responses.reset()
super(GitLabRepositoryProviderTest, self).setUp()
self.login_as(self.user)
self.integration = Integration.objects.create(
provider='gitlab',
name='Example GitLab',
external_id='example.gitlab.com:getsentry',
metadata={
'instance': 'example.gitlab.com',
'domain_name': 'example.gitlab.com/getsentry',
'verify_ssl': False,
'base_url': 'https://example.gitlab.com',
'webhook_secret': 'secret-token-value',
}
)
identity = Identity.objects.create(
idp=IdentityProvider.objects.create(
type='gitlab',
config={},
external_id='1234567890',
),
user=self.user,
external_id='example.gitlab.com:4',
data={
'access_token': '1234567890',
}
)
self.integration.add_organization(self.organization, self.user, identity.id)
self.integration.get_provider().setup()
self.default_repository_config = {
'path_with_namespace': 'getsentry/example-repo',
'name_with_namespace': 'Get Sentry / Example Repo',
'path': 'example-repo',
'id': '123',
'web_url': 'https://example.gitlab.com/getsentry/projects/example-repo',
}
self.gitlab_id = 123
@fixture
def provider(self):
return GitlabRepositoryProvider('gitlab')
def create_repository(self, repository_config, integration_id, organization_slug=None):
responses.add(
responses.GET,
u'https://example.gitlab.com/api/v4/projects/%s' % self.gitlab_id,
json=repository_config
)
responses.add(
responses.POST,
u'https://example.gitlab.com/api/v4/projects/%s/hooks' % self.gitlab_id,
json={'id': 99}
)
with self.feature({'organizations:repos': True}):
response = self.client.post(
path=reverse(
'sentry-api-0-organization-repositories',
args=[organization_slug or self.organization.slug]
),
data={
'provider': self.provider_name,
'installation': integration_id,
'identifier': repository_config['id'],
}
)
return response
def assert_repository(self, repository_config, organization_id=None):
instance = self.integration.metadata['instance']
external_id = u'{}:{}'.format(instance, repository_config['id'])
repo = Repository.objects.get(
organization_id=organization_id or self.organization.id,
provider=self.provider_name,
external_id=external_id
)
assert repo.name == repository_config['name_with_namespace']
assert repo.url == repository_config['web_url']
assert repo.integration_id == self.integration.id
assert repo.config == {
'instance': instance,
'path': repository_config['path_with_namespace'],
'project_id': repository_config['id'],
'webhook_id': 99,
}
@responses.activate
def test_create_repository(self):
response = self.create_repository(self.default_repository_config, self.integration.id)
assert response.status_code == 201
self.assert_repository(self.default_repository_config)
@responses.activate
def test_create_repository_verify_payload(self):
def request_callback(request):
payload = json.loads(request.body)
assert 'url' in payload
assert payload['push_events']
assert payload['merge_requests_events']
expected_token = u'{}:{}'.format(self.integration.external_id,
self.integration.metadata['webhook_secret'])
assert payload['token'] == expected_token
return (201, {}, json.dumps({'id': 99}))
responses.add_callback(
responses.POST,
u'https://example.gitlab.com/api/v4/projects/%s/hooks' % self.gitlab_id,
callback=request_callback
)
response = self.create_repository(self.default_repository_config, self.integration.id)
assert response.status_code == 201
self.assert_repository(self.default_repository_config)
def test_create_repository_null_installation_id(self):
response = self.create_repository(self.default_repository_config, None)
assert response.status_code == 500
def test_create_repository_integration_does_not_exist(self):
integration_id = self.integration.id
self.integration.delete()
response = self.create_repository(self.default_repository_config, integration_id)
assert response.status_code == 500 # TODO(lb): shouldn't this result in a 404?
def test_create_repository_org_given_has_no_installation(self):
organization = self.create_organization(owner=self.user)
response = self.create_repository(
self.default_repository_config,
self.integration.id,
organization.slug)
assert response.status_code == 500
@responses.activate
def test_create_repository_get_project_request_fails(self):
responses.add(
responses.GET,
u'https://example.gitlab.com/api/v4/projects/%s' % self.gitlab_id,
status=503,
)
response = self.create_repository(self.default_repository_config, self.integration.id)
# TODO(lb): it gives a 400 which I'm not sure makes sense here
assert response.status_code == 400
@responses.activate
def test_create_repository_integration_create_webhook_failure(self):
responses.add(
responses.POST,
u'https://example.gitlab.com/api/v4/projects/%s/hooks' % self.gitlab_id,
status=503,
)
response = self.create_repository(self.default_repository_config,
self.integration.id)
assert response.status_code == 400
@responses.activate
def test_on_delete_repository_remove_webhook(self):
response = self.create_repository(self.default_repository_config,
self.integration.id)
responses.reset()
responses.add(
responses.DELETE,
'https://example.gitlab.com/api/v4/projects/%s/hooks/99' % self.gitlab_id,
status=204
)
repo = Repository.objects.get(pk=response.data['id'])
self.provider.on_delete_repository(repo)
assert len(responses.calls) == 1
@responses.activate
def test_on_delete_repository_remove_webhook_missing_hook(self):
response = self.create_repository(self.default_repository_config,
self.integration.id)
responses.reset()
responses.add(
responses.DELETE,
'https://example.gitlab.com/api/v4/projects/%s/hooks/99' % self.gitlab_id,
status=404
)
repo = Repository.objects.get(pk=response.data['id'])
self.provider.on_delete_repository(repo)
assert len(responses.calls) == 1
@responses.activate
def test_compare_commits_start_and_end(self):
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/compare?from=abc&to=xyz' % self.gitlab_id,
json=json.loads(COMPARE_RESPONSE)
)
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits/12d65c8dd2b2676fa3ac47d955accc085a37a9c1/diff' % self.gitlab_id,
json=json.loads(COMMIT_DIFF_RESPONSE)
)
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits/8b090c1b79a14f2bd9e8a738f717824ff53aebad/diff' % self.gitlab_id,
json=json.loads(COMMIT_DIFF_RESPONSE)
)
response = self.create_repository(self.default_repository_config,
self.integration.id)
repo = Repository.objects.get(pk=response.data['id'])
commits = self.provider.compare_commits(repo, 'abc', 'xyz')
assert 2 == len(commits)
for commit in commits:
assert_commit_shape(commit)
@responses.activate
def test_compare_commits_start_and_end_gitlab_failure(self):
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/compare?from=abc&to=xyz' % self.gitlab_id,
status=502
)
response = self.create_repository(self.default_repository_config,
self.integration.id)
repo = Repository.objects.get(pk=response.data['id'])
with pytest.raises(IntegrationError):
self.provider.compare_commits(repo, 'abc', 'xyz')
@responses.activate
def test_compare_commits_no_start(self):
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits/xyz' % self.gitlab_id,
json={'created_at': '2018-09-19T13:14:15Z'}
)
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits?until=2018-09-19T13:14:15Z' % self.gitlab_id,
json=json.loads(COMMIT_LIST_RESPONSE)
)
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits/ed899a2f4b50b4370feeea94676502b42383c746/diff' % self.gitlab_id,
json=json.loads(COMMIT_DIFF_RESPONSE)
)
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits/6104942438c14ec7bd21c6cd5bd995272b3faff6/diff' % self.gitlab_id,
json=json.loads(COMMIT_DIFF_RESPONSE)
)
response = self.create_repository(self.default_repository_config,
self.integration.id)
repo = Repository.objects.get(pk=response.data['id'])
commits = self.provider.compare_commits(repo, None, 'xyz')
for commit in commits:
assert_commit_shape(commit)
@responses.activate
def test_compare_commits_no_start_gitlab_failure(self):
responses.add(
responses.GET,
'https://example.gitlab.com/api/v4/projects/%s/repository/commits/abc' % self.gitlab_id,
status=502
)
response = self.create_repository(self.default_repository_config,
self.integration.id)
repo = Repository.objects.get(pk=response.data['id'])
with pytest.raises(IntegrationError):
self.provider.compare_commits(repo, None, 'abc')
def assert_commit_shape(commit):
assert commit['id']
assert commit['repository']
assert commit['author_email']
assert commit['author_name']
assert commit['message']
assert commit['timestamp']
assert commit['patch_set']
patches = commit['patch_set']
for patch in patches:
assert patch['type'] in commit_file_type_choices
assert patch['path']
| [
"noreply@github.com"
] | ufosky-server.noreply@github.com |
c3fd9209357e7beb607c2433b8948c60c5a6c32f | 0980f758e30bfc2705f868ea976b4ff4b079efad | /apis/migrations/0001_initial.py | f66e26177103d45a217da8bceed3b8b04280864a | [] | no_license | Manpreetcse1212/tim-clone-using-django | 24cd0a95d1ce73059810e73d304178735e0897c6 | d0546fb25f623f4c3a0f1d9850b38344d58b33f9 | refs/heads/main | 2023-07-01T23:11:41.291332 | 2021-08-07T15:29:15 | 2021-08-07T15:29:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # Generated by Django 3.2.5 on 2021-07-13 23:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Simple',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.CharField(max_length=100)),
],
),
]
| [
"ashikpatel.doof@gmail.com"
] | ashikpatel.doof@gmail.com |
f8dc351458feac6bfdf774ff619eb8dedf0f9e91 | 199fd917fc148893aae5c495ce1e142fc2973d78 | /Monte Carlo/Freivalds .py | 92985be31587dfe119a601cbf2eae09a8129d600 | [] | no_license | quan5609/Randomzied-Algorithms | 7d145a78de66e7257cbd788f987ea87eeb5f578d | e0da9930dc8b0333d0f87fbe51151b67b85ce00e | refs/heads/master | 2022-11-21T23:27:55.465077 | 2020-07-19T09:01:34 | 2020-07-19T09:01:34 | 280,786,513 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | #!/usr/bin/env python3
import random
import numpy as np
import optparse
import os
import sys
def freivalds(A, B, C):
r = np.random.randint(0, 2, size=(2))
P = np.dot(A, np.dot(B, r)) - np.dot(C, r)
print(P)
if not np.any(P):
return True
return False
def readCommand(argv):
parser = optparse.OptionParser(
description='Number of trials')
parser.add_option('--trials',
dest='trials',
default=5)
(options, _) = parser.parse_args(argv)
return options
def printMat(mat):
print(mat)
def main():
A = np.random.randint(0, 5, size=(2, 2))
B = np.random.randint(0, 5, size=(2, 2))
C = np.random.randint(0, 5, size=(2, 2))
options = readCommand(sys.argv)
[printMat(mat)for mat in [A, B, C]]
trials = int(options.trials)
for _ in range(trials):
if not freivalds(A, B, C):
return "C != AxB"
return "C probably == AxB"
if __name__ == "__main__":
print(main())
| [
"quan.buicompscibk@hcmut.edu.vn"
] | quan.buicompscibk@hcmut.edu.vn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.