blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bed8fdd79205932c1f16505cfd9077aa45156f68 | 2d9a17e2b896d2f6a90913a4ba02d41f0ede5dd0 | /_58job/page_store.py | ab4764e39b62286c71dc816045dbe148722d6785 | [] | no_license | wolfwhoami/xxxxx | 1cf2ed2c8ed78048d87cccf2953ca86c0871a783 | 670787ec71127bc05c1645cc3d8ef7c3a91fe84b | refs/heads/master | 2020-03-30T00:44:55.864817 | 2016-12-16T01:45:03 | 2016-12-16T01:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
from spider.ipin.savedb import PageStoreBase
from spider.runtime import Log
from spider.util import htmlfind
from spider.util import TimeHandler
import spider
import time
import re
class Jd58PageStore(PageStoreBase):
def __init__(self):
super(Jd58PageStore, self).__init__('jd_58job')
def extract_content(self):
content = htmlfind.findTag(self.get_cur_doc().cur_content, 'div', 'posMsg borb')
try:
content = htmlfind.remove_tag(content[0], 1)
except:
Log.errorbin("invalid jd content %s" % self.get_cur_doc().cur_url, self.get_cur_doc().cur_content)
return None
return content
def page_time(self):
tag = htmlfind.findTag(self.get_cur_doc().cur_content, 'ul', 'class="headTag"')
try:
tag = htmlfind.remove_tag(tag[0], 1)
except:
Log.errorbin("invalid jd pubtime %s" % self.get_cur_doc().cur_url, self.get_cur_doc().cur_content)
raise
if isinstance(tag, unicode):
tag = tag.encode('utf-8')
if "天前" not in tag:
return int(time.time() * 1000)
else:
find = re.search('(\d+).*?(\d+).*?(\d+)', tag, re.S)
if find:
day = find.group(1)
return TimeHandler.getTimeOfNDayBefore(day)
raise Exception("not copy time pattern: {}".format(tag))
def check_should_fetch(self, jobid):
if not super(Jd58PageStore, self).check_should_fetch(jobid):
return False
return True | [
"jianghao@ipin.com"
] | jianghao@ipin.com |
1c3803d5dbc897cd9558e917667e0a262d021045 | 7cd283590c0bf5cd76394969948ac5fc7cc717d4 | /biblioGest/settings.py | d872f5560bcad2d67c24699d28aec1486c8c6aa6 | [] | no_license | vhsreturns/bibliogest | f95fca7fcda6c81521d1c96c5557c1c75a1ce726 | 9c9e3b64b3435162395da307382c206cc059a6d3 | refs/heads/master | 2020-03-22T20:44:21.391213 | 2018-07-11T21:25:22 | 2018-07-11T21:25:22 | 140,627,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,507 | py | """
Django settings for biblioGest project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n(6h#+vfux4!1!#ea65zv!b(#f*_@d89j=r)r#hgu%br!-w#%*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'gestion.apps.GestionConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'biblioGest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'biblioGest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home2/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/' | [
"manuel.humanescabrera@gmail.com"
] | manuel.humanescabrera@gmail.com |
fd7b7ddd03394c9ab63bba77aeff6549fdb9d0bc | cd971dd65f2684682a1f6e9d198b1b650e08181a | /Server/MessageParser.py | 4ca261b75c8e1c0482eec62dd2316a11b1bea26a | [] | no_license | noamg97/ResearchProject | 1b24eaf213b5fd5bc312861c6863f503daf68aa8 | c57d349c426665fad12a188a27e0c572d2fac31d | refs/heads/master | 2021-03-12T22:31:42.018206 | 2015-01-26T22:43:49 | 2015-01-26T22:43:49 | 21,600,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,486 | py | from OpCodes import *
db = None
users_sockets = []
def init(_db, _users_sockets):
global db, users_sockets
db = _db
users_sockets = _users_sockets
#TODO: validate all the variables that are received from user
def parse(msg, username):
if msg[:num_char] == user_state_changed:
parse_user_state_changed(msg[num_char:], username)
elif msg[:num_char] == connect_to_friend:
parse_user_connecting_to_friend(msg[num_char:], username)
elif msg[:num_char] == profile_data_changed:
parse_user_profile_data_changed(msg[num_char:], username)
elif msg[:num_char] == friend_request:
parse_friend_request(msg[num_char:], username)
elif msg[:num_char] == friend_request_accepted:
parse_friend_request_accepted(msg[num_char:], username)
elif msg[:num_char] == friend_request_declined:
parse_friend_request_declined(msg[num_char:], username)
def parse_user_state_changed(data, username):
global db, users_sockets
print 'user ' + str(username) + ' is now ' + str(data)
state = int(data)
db.set_field(username, 'state', state)
try: users_sockets[username].state = state
except KeyError: pass
frd_list = db.get_list_from_field(username, 'friends_list')
for friend in frd_list:
try:
users_sockets[friend].send(send_state_changed + str(username) + ',' + str(state))
except KeyError: pass
def parse_user_connecting_to_friend(data, username):
global db, users_sockets
friend_username = data.strip()
print 'user ' + username + ' starts connecting to ' + friend_username
frd_list = db.get_list_from_field(username, 'friends_list')
if friend_username in frd_list:
if int(db.get_fields(friend_username, 'state')[0][0]) != 0:
if any(users_sockets[username].sleeping_sockets) and any(users_sockets[friend_username].sleeping_sockets):
usr_ip, usr_port = users_sockets[username].use_sleeping()
frnd_ip, frnd_port = users_sockets[friend_username].use_sleeping()
users_sockets[username].send(send_friend_connecting + friend_username + ',' + frnd_ip + ',' + str(frnd_port))
users_sockets[friend_username].send(send_friend_connecting + username + ',' + usr_ip + ',' + str(usr_port))
else: print 'either ' + username + ' or ' + friend_username + " don't have a connected sleeping socket"
else: print friend_username + ' is offline'
else: print friend_username + ' not on ' + username + "'s friends list."
def parse_user_profile_data_changed(data, username):
pass
def parse_friend_request(data, username):
global db, users_sockets
friend_username = data.strip()
print 'user ' + username + ' sent a friend request to user ' + friend_username
if db.does_user_exist(friend_username):
frd_list = db.get_list_from_field(username, 'friends_list')
if friend_username not in frd_list:
db.append_to_field(username, 'sent_friend_requests', friend_username)
if int(db.get_fields(friend_username, 'state')[0][0]) != 0:
users_sockets[friend_username].send(send_friend_request + username)
else:
db.append_to_field(friend_username, 'queued_messages', send_friend_request + username)
else: print friend_username + ' already in ' + username + "'s friends list"
else: print friend_username + ' does not exist'
def parse_friend_request_accepted(data, username):
global db, users_sockets
friend_username = data.strip()
print 'user ' + username + ' accepted a friend request from user ' + friend_username
if username in db.get_list_from_field(friend_username, 'sent_friend_requests'):
db.append_to_field(username, 'friends_list', friend_username)
db.append_to_field(friend_username, 'friends_list', username)
db.remove_from_field(friend_username, 'sent_friend_requests', username)
users_sockets[username].send(send_state_changed + str(friend_username) + ',' + str(users_sockets[friend_username].state))
if int(db.get_fields(friend_username, 'state')[0][0]) != 0:
users_sockets[friend_username].send(send_friend_request_accepted + username)
if username in users_sockets and friend_username in users_sockets:
users_sockets[friend_username].send(send_state_changed + str(username) + ',' + str(users_sockets[username].state))
else:
db.append_to_field(friend_username, 'queued_messages', send_friend_request_accepted + username)
def parse_friend_request_declined(data, username):
global db, users_sockets
friend_username = data.strip()
print 'user ' + username + ' declined a friend request from user ' + friend_username
if username in db.get_list_from_field(friend_username, 'sent_friend_requests'):
db.remove_from_field(friend_username, 'sent_friend_requests', username)
if int(db.get_fields(friend_username, 'state')[0][0]) != 0:
users_sockets[friend_username].send(send_friend_request_declined + username)
else:
db.append_to_field(friend_username, 'queued_messages', send_friend_request_declined + username) | [
"noamg97@gmail.com"
] | noamg97@gmail.com |
7f976f5b8142c14de1f5a2d2cbea50a1fe36c512 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/aio/_sync_token_async.py | 9d2441dc438ea9e84f222b0768eefed6c3454998 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,236 | py | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import Any, Dict
from asyncio import Lock
from azure.core.pipeline import PipelineRequest, PipelineResponse
from azure.core.pipeline.policies import SansIOHTTPPolicy
from .._sync_token import SyncToken
class AsyncSyncTokenPolicy(SansIOHTTPPolicy):
"""A simple policy that enable the given callback with the response.
:keyword callback raw_response_hook: Callback function. Will be invoked on response.
"""
def __init__(self, **kwargs: Any) -> None: # pylint: disable=unused-argument
self._sync_token_header = "Sync-Token"
self._sync_tokens = {} # type: Dict[str, Any]
self._lock = Lock()
async def on_request(self, request: PipelineRequest) -> None: # type: ignore # pylint: disable=arguments-differ, invalid-overridden-method
"""This is executed before sending the request to the next policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
"""
async with self._lock:
sync_token_header = ",".join(str(x) for x in self._sync_tokens.values())
if sync_token_header:
request.http_request.headers.update({self._sync_token_header: sync_token_header})
async def on_response(self, request: PipelineRequest, response: PipelineResponse) -> None: # type: ignore # pylint: disable=arguments-differ, invalid-overridden-method
"""This is executed after the request comes back from the policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
:param response: The PipelineResponse object.
:type response: ~azure.core.pipeline.PipelineResponse
"""
sync_token_header = response.http_response.headers.get(self._sync_token_header)
if not sync_token_header:
return
sync_token_strings = sync_token_header.split(",")
if not sync_token_strings:
return
for sync_token_string in sync_token_strings:
sync_token = SyncToken.from_sync_token_string(sync_token_string)
await self._update_sync_token(sync_token)
async def add_token(self, full_raw_tokens: str) -> None:
raw_tokens = full_raw_tokens.split(",")
for raw_token in raw_tokens:
sync_token = SyncToken.from_sync_token_string(raw_token)
await self._update_sync_token(sync_token)
async def _update_sync_token(self, sync_token: SyncToken) -> None:
if not sync_token:
return
async with self._lock:
existing_token = self._sync_tokens.get(sync_token.token_id, None)
if not existing_token:
self._sync_tokens[sync_token.token_id] = sync_token
return
if existing_token.sequence_number < sync_token.sequence_number:
self._sync_tokens[sync_token.token_id] = sync_token
| [
"noreply@github.com"
] | noreply@github.com |
b5886d532cad889faed1c4b86e4e617731f1e256 | 7b33b61ee640a0813d69c17346b41ce9216bde36 | /venv/Scripts/alembic-script.py | e9482c6d084ec24dbd0439d06767aba18aa6148c | [] | no_license | Mayank0010/Contact_Form | 630e2085553ab37445fa7e161fbc1498728691c3 | 27ea044fdc6aa24f2c47360ab6b0c20ad230bd3b | refs/heads/main | 2023-03-22T13:28:02.882800 | 2021-03-14T18:09:55 | 2021-03-14T18:09:55 | 345,152,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | #!"c:\users\mayank kumar singh\desktop\contact_form\venv\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'alembic==1.5.6','console_scripts','alembic'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'alembic==1.5.6'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('alembic==1.5.6', 'console_scripts', 'alembic')())
| [
"kr.mayank.singh@gmail.com"
] | kr.mayank.singh@gmail.com |
79c41c532063cf3e353701cbe49e18bd227a3312 | 6146661de4e644ae9ec55df883f3a16479766486 | /mydatabase/migrations/0004_auto__add_seo_optimizacija.py | 7b29fe45ac1f380d99d74123a0b4736ee45df739 | [] | no_license | zainabladan/Free-Django-Template | 25ecf1bab06c9603aadde8dd4598562a9121eed0 | ba099d16c7e251298f3c4ded472e16ad716b0d80 | refs/heads/master | 2020-03-24T17:29:37.358667 | 2015-01-08T01:11:15 | 2015-01-08T01:11:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,321 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SEO_OPTIMIZACIJA'
db.create_table(u'mydatabase_seo_optimizacija', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('meta_naslov', self.gf('django.db.models.fields.CharField')(max_length=256)),
('meta_opis', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('slug', self.gf('django.db.models.fields.CharField')(default='/Unesi-URL', max_length=256)),
))
db.send_create_signal(u'mydatabase', ['SEO_OPTIMIZACIJA'])
def backwards(self, orm):
# Deleting model 'SEO_OPTIMIZACIJA'
db.delete_table(u'mydatabase_seo_optimizacija')
models = {
u'mydatabase.answers': {
'Meta': {'object_name': 'Answers'},
'answer': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'questions': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mydatabase.Questions']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'users': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mydatabase.Users']"})
},
u'mydatabase.pages': {
'Meta': {'object_name': 'Pages'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'})
},
u'mydatabase.questions': {
'Meta': {'object_name': 'Questions'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'solved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'users': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mydatabase.Users']"})
},
u'mydatabase.seo_optimizacija': {
'Meta': {'object_name': 'SEO_OPTIMIZACIJA'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_naslov': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'meta_opis': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'default': "'/Unesi-URL'", 'max_length': '256'})
},
u'mydatabase.users': {
'Meta': {'object_name': 'Users'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'banuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'zaporka': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['mydatabase'] | [
"blaz1988@gmail.com"
] | blaz1988@gmail.com |
0948b4ae310c7e9da2787093b25e1f15545eafdd | 8869a0a73aff6895cd826a1aad88e1a350575b85 | /misc/scripts/05-mapbox_upload.py | 0aba10988251913bc3fd84ca32dc25cc3d7e99c9 | [] | no_license | pondrejk/dizzer | 500fe26dc32660b865bad91fa0f1321a7a86da22 | c18e072eebdc607f6d3ec5938c9846910e8d179c | refs/heads/master | 2023-08-31T02:44:38.771194 | 2021-10-14T19:39:07 | 2021-10-14T19:39:07 | 120,441,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | #!/bin/python3
'''
Uploading json data to Mapbox
Token must be set in environment
export MAPBOX_ACCESS_TOKEN="pk.YOUR_ACCESS_TOKEN"
'''
import argparse
import os
import time
import glob
from concurrent import futures
from mapbox import Uploader
from time import sleep
# parse arguments
parser = argparse.ArgumentParser(description='Upolad mbtiles')
parser.add_argument('indir', type=os.path.abspath, help='Input dir with JSONs')
args = parser.parse_args()
service = Uploader()
service.session.params['access_token'] == os.environ['MAPBOX_ACCESS_TOKEN']
def upload(srcfile):
mapid = srcfile
path = "{0}/{1}.{2}".format(args.indir, srcfile, extension)
print("Processing {}".format(srcfile))
with open(path, 'rb') as src:
upload_resp = service.upload(src, mapid)
if upload_resp.status_code == 422:
for i in range(5):
sleep(5)
with open(path, 'rb') as src:
upload_resp = service.upload(src, mapid)
if upload_resp.status_code != 422:
break
def upload_many(filenames):
with futures.ProcessPoolExecutor() as executor:
res = executor.map(upload, filenames)
return len(list(res))
def main(upload_many):
t0 = time.time()
count = upload_many(filenames)
elapsed = time.time() - t0
msg = '{} upload(s) in {:.2f} s'
print(msg.format(count, elapsed))
if __name__ == "__main__":
os.chdir(args.indir)
extension = 'mbtiles' # not needed if extension is not set
filenames = [i.split(".")[0] for i in glob.glob('*.{}'.format(extension))]
main(upload_many)
| [
"pondrejk@redhat.com"
] | pondrejk@redhat.com |
7a0fa4194830d4ae22f4b0a727ae929deed45f70 | 6fb93f19907d316b29e4958d518011c756709f4a | /docs/conf.py | e41af191c285c6bab365cbe6a8bd31a6a8dfe8dd | [
"MIT"
] | permissive | maplewen4/Kubo_Demo | 6bbca648017d8a36e6e4424e807b1744e8947a51 | cff2cd7fb704d0fb7b25bd0e4d10b1e88c12dcbe | refs/heads/master | 2020-11-25T14:27:28.538446 | 2019-12-18T00:09:26 | 2019-12-18T00:09:26 | 228,715,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,325 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# Incase the project was not installed
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import kubo_demo_bowen
# -- Project information -----------------------------------------------------
project = 'Kubo_Demo_Bowen'
copyright = ("2019, BowenHan. Project structure based on the "
"Computational Molecular Science Python Cookiecutter version 1.1")
author = 'BowenHan'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
]
autosummary_generate = True
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'kubo_demo_bowendoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'kubo_demo_bowen.tex', 'Kubo_Demo_Bowen Documentation',
'kubo_demo_bowen', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kubo_demo_bowen', 'Kubo_Demo_Bowen Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'kubo_demo_bowen', 'Kubo_Demo_Bowen Documentation',
author, 'kubo_demo_bowen', 'ai',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| [
"maplewen4@gmail.com"
] | maplewen4@gmail.com |
b48c0e6f402cf036746dd9bedc2f16bb2edb1917 | cb9a67787d5c65ba64defa230d236477f6e5c4ae | /0x04-python-more_data_structures/101-square_matrix_map.py | 084d959f17cad5be0fc7baeb4cacc5bedffd3981 | [] | no_license | Christabel951/alx-higher_level_programming | f26cc28dc1d7dedf5ddb7235a1b2cdd0cb3d97ca | 27f76e423d13237ddd14363847f92d0c229ca06b | refs/heads/main | 2023-09-03T07:08:53.163283 | 2021-11-19T01:49:38 | 2021-11-19T01:49:38 | 403,781,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | #!/usr/bin/python3
def square_matrix_map(matrix=[]):
return list(map(lambda i: list(map(lambda j: j ** 2, i)), matrix))
| [
"crkomo@gmail.com"
] | crkomo@gmail.com |
9e324c68cdb2ba2b6ab9ff3d81f154edf35182cb | a2c7cca7692df98e93ca276c8133d838cae76c6c | /autostock_script.py | 902584eb2f3128c6db10440a884f8b755bfa9d0e | [] | no_license | itsmuzzle/LKQ-Dashboard-Exporter | 969c736403a26e701e2f90071a441d62d71442b3 | ba56487a3067830c132137a0d4b423fb28a2cca0 | refs/heads/master | 2020-05-18T15:07:12.826528 | 2019-05-01T22:26:47 | 2019-05-01T22:26:47 | 184,489,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import credentials
import requests
import time
# Dashboard properties
dashboard_login_url = credentials.login['dashboard_login_url']
dashboard_search_url = credentials.login['dashboard_search_url']
dashboard_username = credentials.login['dashboard_username']
dashboard_password = credentials.login['dashboard_password']
products_to_search = credentials.login['products_to_search']
date_search_from = credentials.login['date_search_from']
date_search_until = credentials.login['date_search_until']
path_to_chrome_driver = credentials.login['path_to_chrome_driver']
def download_data():
# Create a new Chrome session and navigate to the login page
driver = webdriver.Chrome(path_to_chrome_driver)
driver.maximize_window()
driver.implicitly_wait(5)
driver.get(dashboard_login_url)
# Find the login fields, enter and click submit
driver.find_element_by_class_name("user").send_keys(dashboard_username)
driver.find_element_by_class_name("pass").send_keys(dashboard_password)
driver.find_element_by_class_name("loginButton").click()
# Search for each product in the given date range and export as a CSV
for product in products_to_search:
time.sleep(5)
driver.get(dashboard_search_url + "{}/{}/{}".format(product, date_search_from, date_search_until))
driver.implicitly_wait(5)
driver.find_element_by_link_text('Export').click()
driver.find_element_by_xpath("//*[@id=\"modalSuccess\"]/div/div/div[3]/button").click()
driver.quit()
print("Mission completed!")
download_data()
# TODO: combine csv's with pandas | [
"89t0asty@gmail.com"
] | 89t0asty@gmail.com |
f16414d6757f76e3f3127e9cb22d944b90774b0f | c0da8d95836c661ab5ce4ef62837650c4402b8d2 | /pull_data.py | 8fda0031df6876afedec172d0c77c28c2da176ad | [] | no_license | wuwentian/encrypted_traffic | ca3e74cd2b71ed07e4a565a24d81f49a180bddb1 | 3e464328183d750b188bd54a02ad0d1c58d4704c | refs/heads/master | 2020-09-29T08:46:07.056972 | 2019-12-10T01:35:06 | 2019-12-10T01:35:06 | 227,003,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,383 | py | '''
*
* Copyright (c) 2016 Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Cisco Systems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
'''
from data_parser import DataParser
import os
class Pull:
def __init__(self, types=[0], compact=1, max_files=[None,None], **kwargs):
self.num_params = 0
self.types = types
self.compact = compact
self.data = []
self.labels = []
for t in self.types:
if t == 0:
self.num_params += 7
elif t == 1 and self.compact == 0:
self.num_params += 3600
elif t == 1 and self.compact == 1:
self.num_params += 100
elif t == 2 and self.compact == 0:
self.num_params += 900
elif t == 2 and self.compact == 1:
self.num_params += 100
elif t == 3:
self.num_params += 256
elif t == 4:
self.num_params += 186
try:
self.load_data(kwargs["neg_dir"], 0.0, max_files[1])
del kwargs["neg_dir"]
for index, arg in enumerate(kwargs):
self.load_data(kwargs[arg], index+1, max_files[0])
except Exception as e:
print("error get data", e)
# if neg_dir != None:
# self.load_data(neg_dir,0.0, max_files[1])
# if pos_dir != None:
# self.load_data(pos_dir,1.0, max_files[0])
def load_data(self, idir, label, max_files):
files = os.listdir(idir)
num_files = 0
for f in files:
try:
dParse = DataParser(idir + f, self.compact)
except:
print ('Error: failued to parse file %s' % (idir + f))
continue
num_files += 1
tmpTLS = dParse.getTLSInfo()
tmpBD = dParse.getByteDistribution()
tmpIPT = dParse.getIndividualFlowIPTs()
tmpPL = dParse.getIndividualFlowPacketLengths()
tmp = dParse.getIndividualFlowMetadata()
if tmp != None and tmpPL != None and tmpIPT != None:
for i in range(len(tmp)):
tmp_data = []
if 0 in self.types:
tmp_data.extend(tmp[i])
if 1 in self.types:
tmp_data.extend(tmpPL[i])
if 2 in self.types:
tmp_data.extend(tmpIPT[i])
if 3 in self.types:
tmp_data.extend(tmpBD[i])
if 4 in self.types:
tmp_data.extend(tmpTLS[i])
if len(tmp_data) != self.num_params:
continue
self.data.append(tmp_data)
self.labels.append(label)
if max_files != None and num_files >= max_files:
break
| [
"root@dgx-gpu-1.ai"
] | root@dgx-gpu-1.ai |
5cf3f4cd9f1a9c51c8b2da485c1e7bb561a7d943 | 59d5bcd552663e0f66d46920486cb3ddf1f543a9 | /comic_scraper/comic_scraper/middlewares.py | 73e718b8ea3111128083a2fe1c22283c10608ad4 | [] | no_license | kalbers33/webcomicbot | e82e46fdef5711b374c5e707802ecc943b60d533 | 0ba78cf40af5d025d2b4644fa772cca5777fba41 | refs/heads/master | 2020-07-01T12:45:17.085591 | 2019-05-27T21:52:32 | 2019-05-27T21:52:32 | 201,179,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class ComicScraperSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ComicScraperDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"kalbers33@gmail.com"
] | kalbers33@gmail.com |
8bb1441d28ef0efd6e85f47948d918493bee10f1 | 39a2783aad61a2e8d7e1263477a50d3e2abb0b6d | /anju_pro/__init__.py | 1c9e58f6107c10e3fc4e1b9cea7f7681ca5eab13 | [] | no_license | Big-data-spider/anju | e0eb87699c9af21cafd07e50967b7a3bf9cc5fb9 | b3e2eb97da05db85aaf24d1314ad7cef233e7f89 | refs/heads/master | 2020-03-21T07:32:45.179212 | 2018-08-02T10:03:27 | 2018-08-02T10:03:27 | 138,286,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | '__auther__' == 'STC' | [
"stc214@qq.com"
] | stc214@qq.com |
61ffe08a041bf9ab8125c750c6710d2416c6f292 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/__init__.py | 9db90b9b03d86d82c89d8808869b01b69cf370e7 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,549 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import protocol_stats
class ldp_protocol_stats_instance_since_clear(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/statistics/ldp-protocol-stats-instance-since-clear. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__protocol_stats',)
_yang_name = 'ldp-protocol-stats-instance-since-clear'
_rest_name = 'ldp-protocol-stats-instance-since-clear'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__protocol_stats = YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'ldp', u'statistics', u'ldp-protocol-stats-instance-since-clear']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'ldp', u'statistics', u'ldp-protocol-stats-instance-since-clear']
def _get_protocol_stats(self):
"""
Getter method for protocol_stats, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/protocol_stats (list)
YANG Description: protocol stats rx/tx
"""
return self.__protocol_stats
def _set_protocol_stats(self, v, load=False):
"""
Setter method for protocol_stats, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/protocol_stats (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_stats is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_stats() directly.
YANG Description: protocol stats rx/tx
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protocol_stats must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__protocol_stats = t
if hasattr(self, '_set'):
self._set()
def _unset_protocol_stats(self):
self.__protocol_stats = YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
protocol_stats = __builtin__.property(_get_protocol_stats)
_pyangbind_elements = {'protocol_stats': protocol_stats, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
91577320a6ad2fab7a30f0640acbdbcf621586e1 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-enumeration-3-5.py | 41c46ea0f9f6ec5c12819d5834a5ba585aeda8a2 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 717 | py | from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_enumeration_3_xsd.nistschema_sv_iv_list_nmtoken_enumeration_3 import NistschemaSvIvListNmtokenEnumeration3
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_enumeration_3_xsd.nistschema_sv_iv_list_nmtoken_enumeration_3 import NistschemaSvIvListNmtokenEnumeration3Type
obj = NistschemaSvIvListNmtokenEnumeration3(
value=NistschemaSvIvListNmtokenEnumeration3Type.IDENTIFY_THE_FURTHERMORE_PARTNERS_VERSIONS_TO_TECHNOL_THAT_COMMERCE_D_FROM_FRAMEWORKS_WOULD_PA_SAME_FIVE_SIMULATION_COMPLEX_OASIS_TO_THE_NAVAL_DATA_IN_AROMA_DESCRIPTION_BASE_EC_RECOMMEN_SOME_THESE_TOOLS_CO_RELATED
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
ae59f02eab72110000b74d8503fae65c3fc36ecd | e164fd9dce5fef093f85ca009f78570ec2b1c492 | /324. Wiggle Sort II.py | c63081d423ce9f82a653401f08c2dc5fb6ed93ff | [] | no_license | havenshi/leetcode | 58fde93a1f1cbdd3c2faa9566c00383e5812f3a7 | bcb79f329bcb133e6421db8fc1f4780a4eedec39 | refs/heads/master | 2021-01-22T04:15:23.748793 | 2019-11-30T04:25:54 | 2019-11-30T04:25:54 | 92,447,327 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | # Sorting and reoder solution. (92ms)
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums.sort()
med = (len(nums) - 1) / 2
nums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]
# nums[med::-1]为前半段倒序, nums[:med:-1]为后半段倒序
# Time: O(n) ~ O(n^2)
# Space: O(1)
# Tri Partition (aka Dutch National Flag Problem) with virtual index solution. (TLE)
from random import randint
class Solution2(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
def findKthLargest(nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = partitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def partitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
def reversedTriPartitionWithVI(nums, val):
def idx(i, N):
return (1 + 2 * (i)) % N
N = len(nums) / 2 * 2 + 1
i, j, n = 0, 0, len(nums) - 1
while j <= n:
if nums[idx(j, N)] > val:
nums[idx(i, N)], nums[idx(j, N)] = nums[idx(j, N)], nums[idx(i, N)]
i += 1
j += 1
elif nums[idx(j, N)] < val:
nums[idx(j, N)], nums[idx(n, N)] = nums[idx(n, N)], nums[idx(j, N)]
n -= 1
else:
j += 1
mid = (len(nums) - 1) / 2
findKthLargest(nums, mid + 1)
reversedTriPartitionWithVI(nums, nums[mid]) | [
"haiwen.shi01@gmail.com"
] | haiwen.shi01@gmail.com |
22f1fd9c5815b2168f7577a779d1d9ad69b3d806 | 4a3fcb3e93ba88ee09d34b190450ad18a3125d67 | /users/api/views.py | f670ba2e11251fea201869c3c1d44238463ef4c9 | [] | no_license | hllustosa/online-judge | 8c14f3348d7eba56126824f1aca6d9ee907e688d | 4340eefc760ee3122e805214af0aa5f1a4f4fd96 | refs/heads/master | 2023-06-20T22:27:17.359455 | 2021-08-09T03:27:55 | 2021-08-09T03:27:55 | 392,495,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | from api.models import Profile
from .utils import IsAuthenticatedWith, method_permission_classes, ANY
from django.core.paginator import Paginator
from django.http.response import JsonResponse
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework.views import APIView
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from .serializers import ClaimsObtainPairSerializer, UserResponseSerializer
# Create your views here.
class ClaimsTokenObtainPairView(TokenObtainPairView):
serializer_class = ClaimsObtainPairSerializer
class UsersListView(APIView):
@method_permission_classes((IsAuthenticatedWith(ANY),))
def get(self, request):
users = User.objects.all().order_by('id')
name = request.query_params.get('name', None)
page = request.query_params.get('page', 1)
pageSize = request.query_params.get('pageSize', 10)
if name is not None:
users = users.filter(name__icontains=name)
count = users.count()
paginator = Paginator(users, pageSize)
users_serializers = UserResponseSerializer(
paginator.page(page), many=True)
return JsonResponse({'items': users_serializers.data, 'count': count}, safe=False)
class UsersListDetailsView(APIView):
@method_permission_classes((IsAuthenticatedWith(ANY),))
def get(self, request, pk):
user = User.objects.get(pk=pk)
profile = Profile.objects.filter(user=user).first()
if profile == None:
type = ""
else:
type = 'Teacher' if profile.type == profile.TEACHER else 'Student'
return JsonResponse({'id': user.id, 'first_name': user.first_name, 'last_name': user.last_name, 'email': user.email, 'type': type}, safe=False)
| [
"hllustosa@gmail.com"
] | hllustosa@gmail.com |
e9cce5f73136be901491178d96b79d30a3cb9135 | 61b4126af2563e5be0988fd7cba7b62929d3c8b8 | /20171114_assignment3/20171114_part2/Refactored/tests/test_color.py | e7905fc2a6985b9c924847d3efcd319136b8bd58 | [] | no_license | batra98/Mario_Testing | 6ac285a836123c161226a79fb7bff8f68279eb3f | e7e6df41ec432d8302b2cd2c286565218e4ab1ba | refs/heads/master | 2020-04-15T16:28:36.287346 | 2019-01-09T10:10:40 | 2019-01-09T10:10:40 | 164,838,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | import os
import sys
import pytest
from importlib import reload
if 'tests' in os.getcwd():
sys.path.insert(0, os.path.join(os.getcwd(), '../'))
elif 'Refactored' not in os.getcwd():
sys.path.insert(0, os.path.join(os.getcwd(), './Refactored/'))
import color
class Test_color:
def test_color(self):
assert 'Light Red' in color.colors
assert 'Brown' in color.colors
assert 'Blue' in color.colors
assert 'Light Blue' in color.colors
assert 'Purple' in color.colors
assert 'Yellow' in color.colors
assert 'Red' in color.colors
assert 'White' in color.colors
def test_char(self):
assert color.getcolor("m") == color.colors['Light Red']+'m'+'\x1b[0m'
assert color.getcolor("#") == color.colors['Brown']+'#'+'\x1b[0m'
assert color.getcolor("-") == color.colors['Blue']+'-'+'\x1b[0m'
assert color.getcolor(")") == color.colors['Light Blue']+')'+'\x1b[0m'
assert color.getcolor("(") == color.colors['Light Blue']+'('+'\x1b[0m'
assert color.getcolor("$") == color.colors['Purple']+'$'+'\x1b[0m'
assert color.getcolor("e") == color.colors['Yellow']+'e'+'\x1b[0m'
assert color.getcolor("&") == color.colors['Red']+'&'+'\x1b[0m'
assert color.getcolor("M") == color.colors['White']+'M'+'\x1b[0m'
assert color.getcolor("S") == color.colors['Yellow']+'S'+'\x1b[0m'
assert color.getcolor(".") == color.colors['Purple']+'.'+'\x1b[0m'
assert color.getcolor("*") == color.colors['White']+'*'+'\x1b[0m'
reload(sys) | [
"batragaurav2616@gmail.com"
] | batragaurav2616@gmail.com |
30a361a083b45414901e3b65a190c5e58053705b | 45669b92b05526f620359cb16e99129a92cb9787 | /server-app/NeighborhoodWatch/settings.py | 7136b479ded2fa413951c85c3fc52bc7826cb99e | [] | no_license | sero-dev/neighborhood-watch | 649822ee231ee40501af7d6ce0a1fdc5712a56c2 | c0291100f120c1255ecfd576aacc92206dfeae25 | refs/heads/master | 2022-12-18T14:54:34.551038 | 2020-09-27T14:00:04 | 2020-09-27T14:00:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | """
Django settings for NeighborhoodWatch project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2v^bncmv*k@rne63!0tyqx&c6usxddg!)gcb4)+iukk8vww5$0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Incidents.apps.IncidentsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NeighborhoodWatch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'NeighborhoodWatch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'PASSWORD': 'postgres',
'USER': 'postgres',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"sean.rodriguez@outlook.com"
] | sean.rodriguez@outlook.com |
4ef86b545a9342af06c85b513dc096503beae90a | 47b1833510a24fd3dff6598899410b8e448c38d1 | /dividir.py | da2680c934baf094bfb2de863fccdbee48d24f9f | [] | no_license | francinald0/diversas | cd3749015df6bca97e918b2a99086a3c1a2abd14 | 060d12ceeac42fbf72d8478f18a8122a057ae95c | refs/heads/main | 2023-08-12T04:35:04.709723 | 2021-10-03T02:07:50 | 2021-10-03T02:07:50 | 310,326,466 | 0 | 0 | null | 2021-10-03T02:45:57 | 2020-11-05T14:36:34 | Python | UTF-8 | Python | false | false | 11,077 | py | #biblioteca para leitura do arquivo .pdf
from PyPDF2 import PdfFileReader
#biblioteca para modificação do arquivo .pdf
from PyPDF2 import PdfFileWriter
from pathlib import Path
import os
#---lista de funções---
#def nameFirstHalf(file):
#def nameSecondHalf(file):
#def cutFile(inputFile):
#def dividirArquivos(myPath):
def nameFirstHalf(file):
name = file[:-4] + "_1.pdf"
return name
def nameSecondHalf(file):
name = file[:-4] + "_2.pdf"
return name
def cutFile(inputFile):
print("recebido pela rotina cutFile como: \n")
print(inputFile)
input_pdf = PdfFileReader(inputFile)
pdf_writer1 = PdfFileWriter()
pdf_writer2 = PdfFileWriter()
numPages = input_pdf.getNumPages()
if ((numPages % 2)==0):
parte1 = numPages/2
else:
parte1 =(numPages+1)/2
#indice=0
for page in input_pdf.pages[:int(parte1)]:
pdf_writer1.addPage(page)
#print("page "+ str(indice+1)+" done")
#indice = indice + 1
arquivo_saida1 = nameFirstHalf(inputFile)
print("nome do arquivo da primeira metade: \n")
print(arquivo_saida1)
with Path(arquivo_saida1).open(mode="wb") as output_file:
pdf_writer1.write(output_file)
#print("done")
#indice=0
for page in input_pdf.pages[int(parte1):]:
pdf_writer2.addPage(page)
#print("page "+ str(indice+1)+" done")
#indice = indice + 1
arquivo_saida2 = nameSecondHalf(inputFile)
print("nome do arquivo da segunda metade: \n")
print(arquivo_saida2)
with Path(arquivo_saida2).open(mode="wb") as output_file:
pdf_writer2.write(output_file)
#print("done")
#TAM = 10485760 #10MB
#TAM = 2097152 #2MB
def dividirArquivos(myPath):
print("valor recebido pela rotina:" + myPath)
print("início da rotina dividirArquivos...\n")
listaArquivosDividir = []
indice = 0
for folderName, subfolders, filenames in os.walk(myPath):
if folderName[-3:] <> "bkp" #verificar
for filename in filenames:
pathArquivo = folderName+"\\"+filename
print("índice: " + str(indice)+"\n")
print("arquivo: " + pathArquivo + "\n")
print("o arquivo deve ser dividido?: ")
if os.path.getsize(pathArquivo) > TAM:
listaArquivosDividir.append(pathArquivo)
print("sim\n")
print("arquivo adicionado à lista listaArquivosDividir\n\n\n")
else:
print("não\n\n\n")
print("próximo arquivo...")
indice = indice + 1
os.system("pause")
print("foram adicionado um total de" + str(indice) + "arquivos à lista")
print("o comando len(listaArquivosDividir) retorna: " + str(len(listaArquivosDividir)))
print("...agora, os arquivos da lista serão divididos")
os.system("pause")
for file in listaArquivosDividir:
print("primeiro arquivo a ser dividido: ")
print(file)
if os.path.exists(file):
print("arquivo existe.")
cutFile(file)
primeiraMetadeArquivo = nameFirstHalf(file)
print("nome do arquivo da primeira metade gerado pela função nameFirstHalf: ")
print(primeiraMetadeArquivo)
segundaMetadeArquivo = nameSecondHalf(file)
print("nome do arquivo da segunda metade gerado pela função nameSecondHalf: ")
print(segundaMetadeArquivo)
if os.path.getsize(primeiraMetadeArquivo) > TAM:
listaArquivosDividir.append(primeiraMetadeArquivo)
if os.path.getsize(segundaMetadeArquivo) > TAM:
listaArquivosDividir.append(segundaMetadeArquivo)
os.remove(file)
listaArquivosDividir.remove(file)
print("verificar se a rotina está vazia...")
print("resultado do comando len(listaArquivosDividir): " + str(len(listaArquivosDividir)))
print("resultado do if...")
if listaArquivosDividir: #verifica se a lista está vazia
print("não está vazia")
r =input("deseja encerrar a execução?(s/n)")
if r == s:
exit
dividirArquivos(myPath)
else:
print("está vazia")
def main():
op = 0
while (op != 7):
print("1 - nameFirstHalf(file)")
print("2 - nameSecondHalf(file)")
print("3 - cutFile(inputFile)")
print("4 - dividirArquivos(myPath)")
print("5 - ...")
print("6 - ...")
print("7 - Sair")
op = int(input("Escolha a opção: "))
if op == 1:
arquivo = input("arquivo(caminho completo): ")
print(nameFirstHalf(arquivo))
elif op == 2:
arquivo = input("arquivo(caminho completo): ")
print(nameSecondHalf(arquivo))
elif op == 3:
arquivo = input("arquivo(caminho completo): ")
cutFile(arquivo)
elif op == 4:
caminho = input("indique o caminho da pasta raiz: ")
dividirArquivos(caminho)
elif op == 5:
pass
elif op == 6:
pass
elif op == 7:
exit
else:
print("opção inválida\n" + "op = " + str(op))
if __name__ == "__main__":
main()
'''
def dividirAoMeio(inputFile):
#verifica se a barra do caminho fornecido em
#inputFile está para a direita. Caso estaja para a
#esquerda, substitui por barra para a direita
if '\\' in inputFile:
inputFile = inputFile.replace('\\','/')
#print(inputFile)
#extrair nome do arquivo
if '/' in inputFile:
encontrou = False
ultimo = len(inputFile)-1
while not encontrou:
if inputFile[ultimo]== '/':
nomeArquivo = inputFile[ultimo+1:]
nomePasta = inputFile[:ultimo+1]
encontrou = True
else:
ultimo = ultimo - 1
print("nome do arquivo: " + nomeArquivo)
print("nome da pasta: " + nomePasta)
destino1 = nomePasta+nomeArquivo[:-4]+"_1.pdf"
destino2 = nomePasta+nomeArquivo[:-4]+"_2.pdf"
print(destino1)
print(destino2)
input_pdf = PdfFileReader(inputFile)
pdf_writer = PdfFileWriter()
pdf_writer2 = PdfFileWriter()
numPages = input_pdf.getNumPages()
if ((numPages % 2)==0):
parte1 = numPages/2
else:
parte1 =(numPages+1)/2
indice=0
for page in input_pdf.pages[:int(parte1)]:
pdf_writer.addPage(page)
print("page "+ str(indice+1)+" done")
indice = indice + 1
with Path(destino1).open(mode="wb") as output_file:
pdf_writer.write(output_file)
print("done")
indice=0
for page in input_pdf.pages[int(parte1):]:
pdf_writer2.addPage(page)
print("page "+ str(indice+1)+" done")
indice = indice + 1
with Path(destino2).open(mode="wb") as output_file:
pdf_writer2.write(output_file)
print("done")
#cria pasta 'old' para armazenar o arquivo que foi dividido
#para fins de backup, copiando-o em seguida
#verificar primeiro, se o nome do arquivo possui espaços em branco
#se positivo, ele deve ser colocado entre aspas. Deve-se ainda inverter
#as barras, pois o comando "move" exige que as barras estejam invertidas.
if " " in nomeArquivo:
nomeArquivo = "\"" + nomeArquivo +"\""
if os.path.isdir(nomePasta+"old"):
print("old folder already exists in "+ nomePasta+". Moving divided file...")
comandoBarrasInvertidas = 'move '+nomePasta+nomeArquivo+' '+nomePasta+"old/"+nomeArquivo
comandoBarrasInvertidas = comandoBarrasInvertidas.replace('/','\\')
os.system(comandoBarrasInvertidas)
print(comandoBarrasInvertidas)
print("done")
else:
print("old folder don\'t exists in " +nomePasta+". Creating...")
os.mkdir(nomePasta+"old")
print("done")
print("Now, moving divided file...")
comandoBarrasInvertidas = 'move '+nomePasta+nomeArquivo+' '+nomePasta+"old/"+nomeArquivo
comandoBarrasInvertidas = comandoBarrasInvertidas.replace('/','\\')
os.system(comandoBarrasInvertidas)
print("done")
#caminho = input("arquivo, com caminho completo: ")
#dividirAoMeio(caminho)
#dividir arquivo ao meio
pdf_path = "D:/python/pasta_teste/2/Apostila Completa - Curso Renato Saraiva OAB (1).pdf"
input_pdf = PdfFileReader(str(pdf_path))
pdf_writer = PdfFileWriter()
pdf_writer2 = PdfFileWriter()
numPages = input_pdf.getNumPages()
#print(numPages)
if ((numPages % 2)==0):
parte1 = numPages/2
else:
parte1 =(numPages+1)/2
indice=0
for page in input_pdf.pages[:int(parte1)]:
pdf_writer.addPage(page)
print("page "+ str(indice+1)+" done")
indice = indice + 1
with Path("D:/python/pasta_teste/parte1.pdf").open(mode="wb") as output_file:
pdf_writer.write(output_file)
indice=0
for page in input_pdf.pages[int(parte1):]:
#for page in input_pdf.pages[5:9]:
pdf_writer2.addPage(page)
print("page "+ str(indice+1)+" done")
indice = indice + 1
with Path("D:/python/pasta_teste/parte2.pdf").open(mode="wb") as output_file:
pdf_writer2.write(output_file)
pdf_path = "caminho do arquivo"
#criação do objeto pdf, da classe PdfFileReader
#que faz a leitura do arquivo .pdf
pdf = PdfFileReader(str(pdf_path))
first_page = pdf.getPage(0) # atribui ao objeto a primeira página do arquivo lido
#print(pdf.getNumPages()) --> retorna o número de páginas
#print(pdf.documentInfo.title) --> retorna o título do documento
#criação de um objeto que representa um pdf em branco, para receber
#as páginas e/ou alterações que se fizer em outro arquivo .pdf,
#no caso, do objeto pdf, da classe PdfReader
pdf_writer = PdfFileWriter()
pdf_writer.addPage(first_page) #adiciona a primeira página do arquivo lido ao arquivo em branco
with Path("first_page.pdf").open(mode="wb") as output_file:
pdf_writer.write(output_file) # salva o arquivo no disco
#para adicionar 4 páginas do aquivo lido a um arquivo (pdf_out)
pdf_out = PdfFileWriter()
for n in range(1,4):
page = pdf.getPage(n)
pdf_out.addPage(page)
with Path("arquivo_final").open(mode="wb") as output_file:
pdf_writer.write(output_file)
#para adicionar um segmento de páginas
input_pdf = PdfFileReader(str(pdf_path))
pdf_writer = PdfFileWriter()
for page in input_pdf.pages[1:4]:
pdf_writer.addPage(page)
with Path("primeira parte.pdf").open(mode="wb") as output_file:
pdf_writer.write(output_file)
#page = pdf_writer.addBlankPage(width=72, height=72)
#print(type(page))
#with Path("blank.pdf").open(mode="wb") as output_file:
# pdf_writer.write(output_file)
| [
"noreply@github.com"
] | noreply@github.com |
d683917dd0c10a923d5b867d6ffe64100139f261 | b4e1da07f3679ee49475728b38ea3450a9b79ee8 | /tests/test_lc_gen.py | f32493420b3f6ec8b5ad9bd322427d0c8b008ee2 | [] | no_license | flyingleafe/link-cut-tree | 463ac91218aa89c0cb0b85ea06813415f9f31e81 | 50d912108176304e33a49defd589127ea33b6220 | refs/heads/master | 2021-05-11T13:44:05.857261 | 2018-11-01T19:47:03 | 2018-11-01T19:47:03 | 117,685,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | import sys
import os
import numpy as np
from random import randint
from random import sample
INF = 999999
def write_line(f, x):
s = str(x) + '\n'
f.write(s)
fname = sys.argv[1]
n = int(sys.argv[2])
m = int(sys.argv[3])
inp = open(fname + '.in', 'w')
out = open(fname + '.out', 'w')
write_line(inp, n)
vals = np.random.randint(0, INF, size=n, dtype=int)
for i in range(0, n):
write_line(inp, vals[i])
# tree node is a value + link to parent
forest = list(map(lambda v: [v, None], vals))
# maintain the set of forest roots
roots = set(range(0, n))
def ancestors(i):
while i is not None:
i = forest[i][1]
yield i
def mk_get(k):
real_v = forest[k][0]
write_line(inp, "get %d" % k)
write_line(out, real_v)
def mk_add(k, c):
i = k
while i is not None:
forest[i][0] += c
i = forest[i][1]
write_line(inp, "add %d %d" % (k, c))
write_line(out, "added")
def mk_min(k):
i = k
res = None
while i is not None:
res = forest[i][0] if res is None else min((forest[i][0], res))
i = forest[i][1]
if res is None:
raise Exception("Wrong node idx passed")
write_line(inp, "min %d" % k)
write_line(out, res)
def mk_link(i, j):
if forest[j][1] is not None:
raise Exception("Trying to link non-root")
forest[j][1] = i
roots.remove(j)
write_line(inp, "link %d %d" % (i, j))
write_line(out, "linked")
def mk_cut(k):
forest[k][1] = None
roots.add(k)
write_line(inp, "cut %d" % k)
write_line(out, "cut")
def mk_lca(i, j):
i_ancs = set(ancestors(i))
_j = j
lca = None
while _j is not None:
if _j in i_ancs:
lca = _j
break
_j = forest[_j][1]
write_line(inp, "lca %d %d" % (i, j))
write_line(out, lca)
write_line(inp, m)
for i in range(0, m):
k = randint(0, n-1)
act = randint(0, 5)
if act == 0:
mk_get(k)
elif act == 1:
c = randint(-10, 10)
mk_add(k, c)
elif act == 2:
mk_min(k)
elif act == 3:
[v] = sample(roots, 1)
mk_link(k, v)
elif act == 4:
mk_cut(k)
elif act == 5:
v = randint(0, n-1)
mk_lca(k, v)
| [
"flyingleafe@gmail.com"
] | flyingleafe@gmail.com |
0637515c84350843070ea85d41e0c80b1ecac7f2 | bbbd62561fe3fddbd3c77b3f457355aaa43e7851 | /StudentEngines/jak3122/myStack.py | 5b1fa6ae494cb4cca67baf2f4f114b00d1acba4a | [] | no_license | jak3122/Quoridor | 18a459fa18ccc663250ccd177cabf7a7123252e5 | dafb54987a7ccd2759ce4560d2529b5f7371b4fa | refs/heads/master | 2016-09-05T14:08:49.498839 | 2013-01-23T02:17:49 | 2013-01-23T02:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | """
file: stack.py
language: python2/3
author: Sean Strout
description: A linked node implementation of a stack
"""
from .myNode import *
class Stack:
__slots__ = ( "top" )
def __init__(self):
self.top = EmptyListNode() # the top node in the stack
def push(element, stack):
"""Add an element to the top of the stack"""
newnode = ListNode(element, stack.top)
stack.top = newnode
def top(stack):
"""Access the top element oi the stack without removing it"""
if emptyStack(stack):
raise IndexError("top on empty stack")
return stack.top.data
def pop(stack):
"""Remove the top element in the stack (returns None)"""
if emptyStack(stack):
raise IndexError("pop on empty stack")
stack.top = stack.top.next
def emptyStack(stack):
"""Is the stack empty?"""
return isinstance(stack.top, EmptyListNode) | [
"hyzer.jak@gmail.com"
] | hyzer.jak@gmail.com |
87e8cf9804f7975bc2d99c8572cd7d15d160c360 | 21267aa3c87a7fea80fb79b129d2da1628f6bc81 | /cnn_dcn.py | 4dfe4d93c688b4a54c04c33d6ea385a42cb1bbb9 | [] | no_license | happyxuwork/- | 47fff3c8319ede3ee7b4f7021e0d3219896e3551 | f3c9a74a740c0874ecbb38a291b2249e82322366 | refs/heads/master | 2020-06-02T20:45:34.286428 | 2019-05-07T14:44:55 | 2019-05-07T14:44:55 | 86,070,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | from __future__ import absolute_import, division
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch_deform_conv.layers import ConvOffset2D
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# conv11
self.conv11 = nn.Conv2d(1, 32, 3, padding=1)
self.bn11 = nn.BatchNorm2d(32)
# conv12
self.conv12 = nn.Conv2d(32, 64, 3, padding=1, stride=2)
self.bn12 = nn.BatchNorm2d(64)
# conv21
self.conv21 = nn.Conv2d(64, 128, 3, padding= 1)
self.bn21 = nn.BatchNorm2d(128)
# conv22
self.conv22 = nn.Conv2d(128, 128, 3, padding=1, stride=2)
self.bn22 = nn.BatchNorm2d(128)
# out
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv11(x))
x = self.bn11(x)
x = F.relu(self.conv12(x))
x = self.bn12(x)
x = F.relu(self.conv21(x))
x = self.bn21(x)
x = F.relu(self.conv22(x))
x = self.bn22(x)
x = F.avg_pool2d(x, kernel_size=[x.size(2), x.size(3)])
x = self.fc(x.view(x.size()[:2]))#
x = F.softmax(x)
return x
class DeformConvNet(nn.Module):
def __init__(self):
super(DeformConvNet, self).__init__()
# conv11
self.conv11 = nn.Conv2d(1, 32, 3, padding=1)
self.bn11 = nn.BatchNorm2d(32)
# conv12
self.offset12 = ConvOffset2D(32)
self.conv12 = nn.Conv2d(32, 64, 3, padding=1, stride=2)
self.bn12 = nn.BatchNorm2d(64)
# conv21
self.offset21 = ConvOffset2D(64)
self.conv21 = nn.Conv2d(64, 128, 3, padding= 1)
self.bn21 = nn.BatchNorm2d(128)
# conv22
self.offset22 = ConvOffset2D(128)
self.conv22 = nn.Conv2d(128, 128, 3, padding=1, stride=2)
self.bn22 = nn.BatchNorm2d(128)
# out
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv11(x))
x = self.bn11(x)
x = self.offset12(x)
x = F.relu(self.conv12(x))
x = self.bn12(x)
x = self.offset21(x)
x = F.relu(self.conv21(x))
x = self.bn21(x)
x = self.offset22(x)
x = F.relu(self.conv22(x))
x = self.bn22(x)
x = F.avg_pool2d(x, kernel_size=[x.size(2), x.size(3)])
x = self.fc(x.view(x.size()[:2]))
x = F.softmax(x)
return x
def freeze(self, module_classes):
'''
freeze modules for finetuning
'''
for k, m in self._modules.items():
if any([type(m) == mc for mc in module_classes]):
for param in m.parameters():
param.requires_grad = False
def unfreeze(self, module_classes):
'''
unfreeze modules
'''
for k, m in self._modules.items():
if any([isinstance(m, mc) for mc in module_classes]):
for param in m.parameters():
param.requires_grad = True
def parameters(self):
return filter(lambda p: p.requires_grad, super(DeformConvNet, self).parameters())
def get_cnn():
return ConvNet()
def get_deform_cnn(trainable=True, freeze_filter=[nn.Conv2d, nn.Linear]):
model = DeformConvNet()
if not trainable:
model.freeze(freeze_filter)
return model | [
"noreply@github.com"
] | noreply@github.com |
a155b79c1a33bd7650a4e79b9ce0bd0d7bebb71d | d7e03c4b499d176dc8a524e14b4806ded69c100c | /faotools/FAOTools.py | 7d5aa845139242929b48df0a74e4cfd3ca2bf85f | [] | no_license | Pacopag/faolyzer | 036f76ff6e7d4580d38d16796c60ff4a0b1a54c5 | 7f708e2d1a7c75b96e950eea5f4e6283117f87e3 | refs/heads/master | 2021-01-23T07:03:50.577482 | 2013-08-15T01:56:34 | 2013-08-15T01:56:34 | 12,098,096 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115,498 | py | from __future__ import division #ensures division as double
from pymongo import Connection
def get_land_used_for_production(year,country_code,item_code,quantity,flag=[]):
"""
Given a year, country code, and trade item code, and quantity of that item, return the land area
used to produce that quantity in the given country.
"""
if item_code in livestock_codes:
carcass_weight = get_carcass_weight(year,country_code,item_code)
quantity *= carcass_weight
meat_code = livestock_reverse_mappings[item_code]
return get_land_used_for_production(year,country_code,meat_code,quantity)
source_codes, multipliers, flags = get_source_tree(item_code)
if 0 in flags or 2 in flags:
source_yields = {}
source_ssrs = {}
source_weights = {}
for code in source_codes:
source_yields[code],f=get_yield(year,country_code,code)
#print "Yield",code,source_yields[code]
source_ssrs[code],f=get_ssr(year,country_code,code,incl_exports=False)
#print "SSR",code,source_ssrs[code]
source_production,f=get_production(year,country_code,code)
#print "Production",code,source_production
if isinstance(source_yields[code],dict):
source_production = source_production['T']
source_yields[code] = source_yields[code]['T']
source_imports,source_exports = get_import_export(year,country_code,code)
source_weights[code] = source_production+float(source_imports)-float(source_exports)
sum_weights = sum(source_weights.values())
#print "Sum weights",sum_weights
if sum_weights==0:
sum_weights = float("inf")
source_weights = {code:(weight/sum_weights) for code,weight in source_weights.iteritems()}
source_displacements = {}
displacement = 0.0
for code,multiplier in zip(source_codes,multipliers):
source_displacements[code] = quantity*float(source_ssrs[code])*float(source_weights[code])/float(source_yields[code])/multiplier if source_yields[code]>0 else 0
displacement += source_displacements[code]
return displacement,source_displacements
else:
return 0.0,{}
def get_offtake_rate(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year, country_code, and primary livestock item code, return the off-take rate for the associated live animals.
For non-ruminants, the value defaults to 1.
"""
if item_code not in bovine_meat_codes+ovine_meat_codes:
return 1.0,"NR"
if org_year is None:
org_year = year
#get reported number slaughered
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementgroup':producing_animals_group}
fields = {'value':1,'flag':1}
rec,f = find_one(table_productionlivestockprimary,spec,fields)
num_slaughtered = rec['value'] if rec is not None else 0.0
no_data = num_slaughtered==0
if no_harvest and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_offtake_rate(year+1,country_code,item_code,flag,org_year,next_dir,aggregate_level)
elif num_slaughtered and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set(['Ny']))#flag.translate(None,'Ny')+"Py"
#flag.append('Py')
return get_offtake_rate(org_year-1,country_code,item_code,flag,org_year,next_dir,aggregate_level)
elif num_slaughtered and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append('Py')
return get_offtake_rate(year-1,country_code,item_code,flag,org_year,next_dir,aggregate_level)
elif num_slaughtered and country_code!=world_code:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])#'A'+str(region_code)
return get_offtake_rate(org_year,region_code,item_code,flag,org_year,next_dir,aggregate_level)
elif no_harvest:
return 0.0,"No data"
#get number of meat animals
num_meat_animals = get_num_animals(year,country_code,item_code,from_db=True)[0]['T']
#get number of milk animals
milk_code = meat_milkeggs_mappings[item_code]
num_milk_animals = get_num_animals(year,country_code,milk_code,from_db=True)[0]['T']
#get number of culled milk animals
cull_rate = get_cull_rate(year,country_code,milk_code,from_db=True)[0]
num_culled = num_milk_animals*cull_rate
def get_num_animals(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year, country_code, and primary livestock item_code or livestock item_code, return the number of animals in that system.
For live animal item, this is just the stocks reported in FAOSTAT with all units converted to "head".
For milk/egg items, this is the number of producing/laying animals.
For meat items, this is the larger of number slaughtered and (stocks - milk/eggs animals)
"""
if from_db:
(num_animals,flag) = ({'T':0.0,'ML':0.0,'P':0.0},'No data')#(rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}
fields = {'elementcode':1,'value':1,'flag':1}
qry,f = find(table_liveanimalproduction,spec,fields)
for rec in qry:
if rec['elementcode']==-2100:
num_anmials['T'] = rec['value']
elif rec['elementcode']==-2101:
num_animals['ML'] = rec['value']
elif rec['elementcode']==-2102:
num_animals['P'] = rec['value']
else:
print "Invalid elementcode in get_num_animals"
raise ValueError;
return num_animals,flag
if org_year is None:
org_year = year
is_primary = item_code in milkeggsmeat_animal_mappings
# Get stocks of corresponding animal
animal_code = item_code
primary_code = item_code
if is_primary:
animal_code = milkeggsmeat_animal_mappings[item_code]
else:
primary_code = livestock_reverse_mappings[item_code]
num_stocks = 0
spec = {'year':year,'countrycode':country_code,'itemcode':animal_code}
fields = {'elementcode':1,'value':1}
rec,f = find_one(table_productionlivestock,spec,fields)
if rec is not None:
mult = 1000.0 if rec['elementcode']==5112 else 1.0 #convert 1000 head to head
num_stocks = mult*rec['value']
# For meat,milk and egg codes, get number producing/slaughtered.
num_producing = 0
if is_primary:
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementgroup':producing_animals_group}
fields = {'elementcode':1,'value':1}
rec,f = find_one(table_productionlivestockprimary,spec,fields)
if rec is not None:
mult = 1000.0 if rec['elementcode'] in khead_codes else 1.0
num_producing = mult*rec['value']
no_data = num_stocks+num_producing==0.0
if no_data and next_dir>-1 and year<max_year-1: # the -1 is a band-aid since 2010 land data is not available yet
next_dir = 1
#flag = list(set(flag)-set(['Fr']))#flag.translate(None,'Fr') + "Ny"
#flag.append('Ny')
return get_num_animals(year+1,country_code,item_code,flag,org_year,next_dir,aggregate_level)
elif no_data and year==max_year-1 and org_year!=min_year: # the -1 is a band-aid since 2010 land data is not available yet
next_dir = -1
#flag = list(set(flag)-set(['Fr'])-set(['Ny']))#flag.translate(None,'Fr') + "Ny"
#flag.append('Py')
return get_num_animals(org_year-1,country_code,item_code,flag,org_year,next_dir,aggregate_level)
elif no_data and next_dir < 0 and year>min_year:
next_dir = -1
#flag = list(set(flag)-set(['Fr']))#flag.translate(None,'Fr') + "Ny"
#flag.append('Py')
return get_num_animals(year-1,country_code,item_code,flag,org_year,next_dir,aggregate_level)
elif no_data:
return {'T':0.0,'ML':0.0,'P':0.0}, "No data"
yr = year-1970 #Bouwman et al. (2005) data starts at 1970, but the quadratic params a,b,c are fitted to the shifted data where 1970 -> 0
region_code = get_country_region(country_code)
spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':primary_code}
fields = {'a':1,'b':1,'c':1}
qry2,f2 = find(table_systemanimalfractions,spec,fields,sort=[('aggregatecode',-1)])
rec2 = qry2.next()
MLfrac = rec2['a']*yr*yr + rec2['b']*yr + rec2['c'] #fraction of animals from mixed+landless systems
# Map itemcode to liveanimal code if meat product
num = 0
if item_code in livestock_codes:
num = num_stocks
elif item_code in milk_codes+egg_codes:
num = num_producing
elif item_code in meat_milkeggs_mappings:
corresp_code = meat_milkeggs_mappings[item_code]
num_corresp,f = get_num_animals(year,country_code,corresp_code)
num_corresp = num_corresp['T']
num = num_stocks - num_corresp
if num_producing > num:
num = num_producing
else:
num = num_producing if num_producing > num_stocks else num_stocks
num_animals = num
num_animals_ML = MLfrac*num
num_animals_P = num_animals - num_animals_ML
flag = ''
ret = {'T':num_animals,'ML':num_animals_ML,'P':num_animals_P}
return ret,flag
def get_stocking_rate(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year, country_code, and primary ruminant livestock item_code, return the number of animals per hectare of pasture
"""
if from_db:
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}
fields = {'value':1,'flag':1}
rec = find_one(table_stockingrates,spec,fields)
(stocking_rate,flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return stocking_rate,flag
pa,f = get_pasture_areas(year,country_code,item_code)
pasture_area = pa['T']
pasture_area_ML = pa['ML']
pasture_area_P = pa['P']
na,f = get_num_animals(year,country_code,item_code)#,from_db=True)
num_animals = na['T']
num_animals_ML = na['ML']
num_animals_P = na['P']
#print pasture_area_P, num_animals_P
stocking_rate = num_animals/pasture_area if pasture_area!=0 else 0.0
stocking_rate_ML = num_animals_ML/pasture_area_ML if pasture_area_ML!=0 else 0.0
stocking_rate_P = num_animals_P/pasture_area_P if pasture_area_P!=0 else 0.0
flag = ''
ret = {'T':stocking_rate,'ML':stocking_rate_ML,'P':stocking_rate_P}
return ret,flag
def get_weighted_yield(year,country_code,item_codes,sector='total',sys_code=-5511,imports=True,exports=True,cull=False,flag=[],org_year=None,next_dir=0,aggregate_level=0,get_next=False):
"""
Get the average yield of primary commodities specified by item_codes.
"""
production = 0.0
area_harvested = 0.0
for item_code in item_codes:
p,p_flag = get_production(year,country_code,item_code,sys_code,imports,exports,cull,from_db=True)
a,a_flag = get_area_harvested(year,country_code,item_code,sector,get_next=get_next,from_db=True)
if isinstance(p,dict): #livestock products return dictionaries...get only total "T" component
p = p['T']
a = a['T']
production += p
area_harvested += a
wyield = production/area_harvested if area_harvested!=0 else 0.0
flag = ''
return wyield,flag
def get_livestock_stats(year,country_code,item_code,flag='',org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year country_code and milk or egg item code, get the fraction of milk/laying animals that
were likely culled for meat during the given year.
"""
if from_db:
stats = {
'stocks':0,
'meat_animals':0,
'meat_animals_ML':0,
'meat_animals_P':0,
'producing_animals_T':0,
'producing_animals_ML':0,
'producing_animals_P':0,
'births':0,
'meat_births':0,
'dairyegg_births':0,
'old_maids':0,
'slaughtered':0,
'offtake_rate':0,
'offtake_rate_ML':0,
'offtake_rate_P':0,
'carcass_weight':0,
'carcass_weight_ML':0,
'carcass_weight_P':0,
'cull':0,
'cull_rate':0
}
flag = "No data"
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}
fields = {'value':1,'flag':1}
rec = find_one(table_livestockstats,spec,fields)
return stats,flag
if org_year is None:
org_year = year
if year == min_year:
year += 1
elif year == max_year:
year -= 1
is_milkegg_animal = True
milkegg_code = None
try:
if item_code in livestock_codes:
animal_code = item_code
meat_code = animal_meat_mappings[item_code]
milkegg_code = animal_milkeggs_mappings[item_code]
elif item_code in milk_codes+egg_codes:
milkegg_code = item_code
meat_code = milkeggs_meat_mappings[item_code]
animal_code = milkeggs_animal_mappings[item_code]
elif item_code in meat_codes:
meat_code = item_code
animal_code = meat_animal_mappings[item_code]
milkegg_code = meat_milkeggs_mappings[item_code]
else:
print item_code,"is an invalid item code for get_livestock_stats"
raise ValueError
except KeyError:
is_milkegg_animal = False
mult = 1000.0 if milkegg_code in egg_codes else 1.0
# Get animal stock
spec = {'year':{'$in':[year-1,year,year+1]},'countrycode':country_code,'itemcode':animal_code}
fields = {'year':1,'value':1}
qry,f = find(table_productionlivestock,spec,fields)
(stocks,last_stocks,next_stocks) = (0.0, 0.0, 0.0)
for rec in qry:
if rec['year']==year:
stocks = mult*rec['value']
elif rec['year']==year+1:
next_stocks = mult*rec['value']
elif rec['year']==year-1:
last_stocks = mult*rec['value']
#Get live animal import/export
cc = country_code if country_code!=china_producing_code else china_trade_code
spec = {'year':{'$in':[year-1,year,year+1]},'countrycode':cc,'itemcode':animal_code,'elementcode':{'$in':import_codes+export_codes}}
fields = {'year':1,'elementcode':1,'value':1}
qry,f = find(table_tradeliveanimals,spec,fields)
(trade,last_trade,next_trade) = (0.0,0.0,0.0)
for rec in qry:
if rec['elementcode'] in import_codes:
if rec['year']==year:
trade += mult*rec['value']
elif rec['year']==year+1:
next_trade += mult*rec['value']
elif rec['year']==year-1:
last_trade += mult*rec['value']
elif rec['elementcode'] in export_codes:
if rec['year']==year:
trade -= mult*rec['value']
elif rec['year']==year+1:
next_trade -= mult*rec['value']
elif rec['year']==year-1:
last_trade -= mult*rec['value']
# Domestic stock after trade
domestic = stocks+trade
last_domestic = last_stocks+last_trade
next_domestic = next_stocks+next_trade
# Get number of animals slaughtered
spec = {'year':{'$in':[year-1,year,year+1]},'countrycode':country_code,'itemcode':meat_code,'elementgroup':producing_animals_group}
fields = {'year':1,'value':1}
qry,f = find(table_productionlivestockprimary,spec,fields)
(slaughtered,next_slaughtered,last_slaughtered) = (0.0,0.0,0.0)
for rec in qry:
if rec['year']==year:
slaughtered = mult*rec['value']
elif rec['year']==year+1:
next_slaughtered = mult*rec['value']
elif rec['year']==year-1:
last_slaughtered = mult*rec['value']
# No data condition
if stocks==0 or next_domestic==0:
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])#'A'+str(region_code)
flag = 'A'
return get_livestock_stats(org_year,region_code,item_code,flag,org_year,next_dir,aggregate_level)
# We can stop here if related animal is meat-only
if not is_milkegg_animal:
annual_stocks1 = slaughtered+next_stocks
annual_stocks2 = stocks
annual_stocks = max([annual_stocks1,annual_stocks2])
births1 = annual_stocks - domestic
last_survivors = last_domestic - last_slaughtered
births2 = stocks - last_survivors
births = max([births1,births2])
offtake_rate = slaughtered/stocks
production,f = get_livestockprimary_production(year,country_code,meat_code,imports=False,exports=False,cull=True)
production_T = production['T']
carcass_weight = production_T/slaughtered if slaughtered!=0 else get_carcass_weight(year,country_code,animal_code)
return {
'stocks':annual_stocks,
'meat_animals':annual_stocks,
'meat_animals_ML':annual_stocks,
'meat_animals_P':0,
'producing_animals_T':0,
'producing_animals_ML':0,
'producing_animals_P':0,
'births':births,
'meat_births':births,
'dairyegg_births':0,
'old_maids':0,
'slaughtered':slaughtered,
'offtake_rate':offtake_rate,
'offtake_rate_ML':offtake_rate,
'offtake_rate_P':0,
'carcass_weight':carcass_weight,
'carcass_weight_ML':carcass_weight,
'carcass_weight_P':0,
'cull':0,
'cull_rate':0
},flag
# Get number of producing animals
spec = {'year':{'$in':[year-1,year,year+1]},'countrycode':country_code,'itemcode':milkegg_code,'elementgroup':producing_animals_group}
fields = {'year':1,'value':1}
qry,f = find(table_productionlivestockprimary,spec,fields)
(producing,next_producing,last_producing) = (0.0,0.0,0.0)
for rec in qry:
if rec['year']==year:
producing = mult*rec['value']
elif rec['year']==year+1:
next_producing = mult*rec['value']
elif rec['year']==year-1:
last_producing = mult*rec['value']
# Here's the meat
if milkegg_code in milk_codes:
survivors = domestic - slaughtered
next_births = next_stocks - survivors
next_dairy_share = next_producing/next_domestic
next_dairy_births = next_births*next_dairy_share
next_old_maids = next_producing - next_dairy_births
cull = producing - next_old_maids
cull_rate = cull/producing if producing!=0 else 0.0
last_survivors = last_domestic - last_slaughtered
births = stocks - last_survivors
dairyegg_share = producing/domestic
dairyegg_births = births*dairyegg_share
meat_births = births - dairyegg_births
old_maids = producing - dairyegg_births
annual_stocks = stocks
off_take_rate = (slaughtered-cull)/(stocks - producing) if (stocks - producing)>0 else 0.0
elif milkegg_code in egg_codes:
annual_stocks = slaughtered+next_stocks
dairyegg_share = producing/annual_stocks if annual_stocks>0 else 1.0
births = annual_stocks - domestic
dairyegg_births = births*dairyegg_share
next_old_maids = next_producing - dairyegg_births
cull = producing-next_old_maids
cull_rate = cull/producing if producing!=0 else 0.0
last_annual_stocks = last_slaughtered+stocks
last_dairyegg_share = last_producing/last_annual_stocks
last_births = last_annual_stocks - last_domestic
last_dairyegg_births = last_births*last_dairyegg_share
old_maids = producing - last_dairyegg_births
meat_births = births - dairyegg_births
off_take_rate = 1
else:
raise ValueError
if cull_rate < 0:
cull_rate = 0.0
elif cull_rate > 1:
cull_rate = 1.0
yr = year-1970 #Bouwman et al. (2005) data starts at 1970, but the quadratic params a,b,c are fitted to the shifted data where 1970 -> 0
region_code = get_country_region(country_code)
spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':meat_code}
fields = {'a':1,'b':1,'c':1}
qry2,f2 = find(table_systemanimalfractions,spec,fields,sort=[('aggregatecode',-1)])
rec2 = qry2.next()
MLfrac_animals = rec2['a']*yr*yr + rec2['b']*yr + rec2['c'] #fraction of animals from mixed+landless systems
qry2,f2 = find(table_systemslaughterfractions,spec,fields,sort=[('aggregatecode',-1)])
rec2 = qry2.next()
MLfrac_slaughter = rec2['a']*yr*yr + rec2['b']*yr + rec2['c'] #fraction of animals from mixed+landless systems
production,f = get_livestockprimary_production(year,country_code,meat_code,imports=False,exports=False,cull=True)
production_T = production['T']
production_ML = production['ML']
production_P = production['P']
slaughtered_T = slaughtered - cull
slaughtered_ML = MLfrac_slaughter*slaughtered_T
slaughtered_P = (1-MLfrac_slaughter)*slaughtered_T
carcass_weight = production_T/slaughtered_T if slaughtered_T!=0 else get_carcass_weight(year,country_code,animal_code)
carcass_weight_ML = production_ML/slaughtered_ML if slaughtered_ML!=0 else carcass_weight
carcass_weight_P = production_P/slaughtered_P if slaughtered_P!=0 else 0.0
meat_animals_T = annual_stocks - producing
meat_animals_ML = MLfrac_animals*meat_animals_T
meat_animals_P = (1-MLfrac_animals)*meat_animals_T
offtake_rate = slaughtered_T/meat_animals_T if meat_animals_T!=0 else 0.0
offtake_rate_ML = slaughtered_ML/meat_animals_ML if meat_animals_ML!=0 else 0.0
offtake_rate_P = slaughtered_P/meat_animals_P if meat_animals_P!=0 else 0.0
producing_ML = MLfrac_animals*producing
producing_P = (1-MLfrac_animals)*producing
stats = {
'stocks':annual_stocks,
'meat_animals':meat_animals_T,
'meat_animals_ML':meat_animals_ML,
'meat_animals_P':meat_animals_P,
'producing_animals_T':producing,
'producing_animals_ML':producing_ML,
'producing_animals_P':producing_P,
'births':births,
'meat_births':meat_births,
'dairyegg_births':dairyegg_births,
'old_maids':old_maids,
'slaughtered':slaughtered_T,
'offtake_rate':offtake_rate,
'offtake_rate_ML':offtake_rate_ML,
'offtake_rate_P':offtake_rate_P,
'carcass_weight':carcass_weight,
'carcass_weight_ML':carcass_weight_ML,
'carcass_weight_P':carcass_weight_P,
'cull':cull,
'cull_rate':cull_rate
}
return stats,flag
def get_livestockprimary_yield(year,country_code,lp_code,imports=True,exports=True,cull=False,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year country_code and primary livestock item code, return the yield as tonnes per hectare of land
used to produce the primary livestock item.
"""
if from_db:
(lpy,lpy_flag) = ({"T":0.0,"P":0.0,"ML":0.0,"C":0.0},"No data")
spec = {'year':year,'countrycode':country_code,'itemcode':lp_code}#,'elementcode':sys_code}
fields = {'elementcode':1,'value':1,'flag':1}
qry,f = find(table_livestockyields,spec,fields)
for rec in qry:
if rec['elementcode']==-5419:
lpy['T']=rec['value']
elif rec['elementcode']==-5416:
lpy['C']=rec['value']
elif rec['elementcode']==-5417:
lpy['P']=rec['value']
elif rec['elementcode']==-5418:
lpy['ML']=rec['value']
else:
print "Invalid elementcode in livestockareaharvested"
raise ValueError
lpy_flag = ''
return lpy,lpy_flag
if org_year is None:
org_year = year
"""if sector=="total":
sys_code = -5511
elif sector=="crop":
sys_code = -5512
elif sector=="pasture":
sys_code = -5513
"""
production,lpp_flag = get_livestockprimary_production(year,country_code,lp_code=lp_code,imports=imports,exports=exports,cull=cull)
#production_T = production['T']
#production_ML = production['ML']
#production_P = production['P']
#if lpp_flag!='':
# flag.extend(["P",lpp_flag,"P"])
#production = productions["T"][lp_code]
area_harvested,ah_flag = get_livestockprimary_area_harvested(year,country_code,lp_code,from_db=True)
area_harvested.update((k,float(v)) for k,v in area_harvested.items()) #because scientific notation is stored as unicode in mongo
#area_harvested_T = area_harvested['T']
#area_harvested = area_harvested["total"]
#if ah_flag!='':
# flag.extend(["Ah",ah_flag,"Ah"])
no_harvest = sum(area_harvested.values())==0
if no_harvest and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_livestockprimary_yield(year+1,country_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_harvest and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set(['Ny']))#flag.translate(None,'Ny')+"Py"
#flag.append('Py')
return get_livestockprimary_yield(org_year-1,country_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_harvest and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append('Py')
return get_livestockprimary_yield(year-1,country_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_harvest and country_code!=world_code:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])#'A'+str(region_code)
return get_livestockprimary_yield(org_year,region_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_harvest:
return 0.0,"No data"
#print year,country_code,lp_code
yld_T = production['T']/area_harvested['T'] if area_harvested['T']!=0 else 0.0
yld_C = production['ML']/area_harvested['C'] if area_harvested['C']!=0 else 0.0
yld_ML = production['ML']/(area_harvested['P_ML']+area_harvested['C']) if (area_harvested['P_ML']+area_harvested['C'])!=0 else 0.0
yld_P = production['P']/area_harvested['P_P'] if area_harvested['P_P']!=0 else 0.0
"""try:
yld = production/float(area_harvested) if area_harvested!=0 else float('inf')
except TypeError:
print year,country_code,lp_code,area_harvested
raise
"""
#flag = ''.join(flag)
flag = ''
yld = {'T':yld_T,'C':yld_C,'ML':yld_ML,'P':yld_P}
return yld,flag
def get_feed_ssr(year,country_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country_code and primary livestock code, return the self-suffiency ration of all feed components. This
returns a single value for all components weighted according the component's proportion in the feed.
"""
if from_db:
spec={'year':year,'countrycode':country_code}
fields={'value':1,'flag':1}
rec,f = find_one(table_feedssr,spec,fields)
(feed_ssr,flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,'No data')
return feed_ssr,flag
if org_year is None:
org_year = year
try_codes = [867,1058,1035,882]
feed_ssr = 0.0
for lp_code in try_codes:
feed_quantities,fq_flag = get_feed_quantities(year,country_code,lp_code)
total_feed = sum(feed_quantities.values())
if total_feed==0:
continue
feed_props = {k:v/total_feed for k,v in feed_quantities.iteritems()}
for k,v in feed_quantities.iteritems():
prop = v/total_feed
ssr,ssr_flag = get_ssr(year,country_code,k,from_db=True)
#print k,v,prop,ssr
ssr = float(ssr)
feed_ssr += prop*ssr if abs(ssr)<5 else 0.0 #the <5 condition just drops anomalies
if feed_ssr!=0:
break
no_data = feed_ssr==0
"""if no_data and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_feed_ssr(year+1,country_code,flag,org_year,next_dir,aggregate_level)
elif no_data and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set('Ny'))#flag.translate(None,'Ny')+"Py"
#flag.append("Py")
return get_feed_ssr(org_year-1,country_code,flag,org_year,next_dir,aggregate_level)
elif no_data and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append("Py")
return get_feed_ssr(year-1,country_code,flag,org_year,next_dir,aggregate_level)
elif no_data:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])
return get_feed_ssr(org_year,region_code,flag,org_year,next_dir,aggregate_level)
"""
if no_data and country_code!=world_code:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])
return get_feed_ssr(org_year,region_code,flag,org_year,next_dir,aggregate_level)
flag = ''
return feed_ssr,flag
#feed_items_in_production
def get_ssr(year,country_code,item_code,incl_exports=True,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year and country code and item_code, return the self-sufficiency ratio (i.e. production/domestic_supply).
item_code may be from either commodity balance or production, but the corresponding dataset will be used. If in doubt
be sure to use the production item codes (e.g. 56 instead of 2514 for maize)
Note: animal codes are mapped to corresponding meat codes
To do: Handle item_code mapping automatically. Need dictionary of mappings for all items.
"""
if item_code in [1158,1150]: #This is a hack. Should instead delete this condition and delete all records in CommodityTrees.csv where source is 1158.
return 0.0,'No data'
if item_code in livestock_codes:
item_code = livestock_reverse_mappings[item_code]
if from_db:
spec={'year':year,'countrycode':country_code,'itemcode':item_code}
fields={'value':1,'flag':1}
rec,f = find_one(table_ssr,spec,fields)
(ssr,flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,'No data')
return ssr,flag
if org_year is None:
org_year = year
ssr = None
reported = False
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':{'$in':[domestic_supply_code,production_code]}}
fields = {'elementcode':1,'value':1}
qry = table_commoditybalance.find(spec,fields,sort=[('elementcode',1)])
for rec in qry:
reported = True
if rec['elementcode']==domestic_supply_code:
ssr = rec['value']
elif rec['elementcode']==production_code and ssr is not None:
ssr = 1.0*rec['value']/ssr
if not reported: #not a reporter
ssr = 0.0
if item_code in crop_codes:
table = table_cropproduction
elif item_code in livestockprimary_codes:
table = table_livestockproductionimportexport
trade_item_code = item_code
trade_item_conv = 1.0
if item_code in trade_to_production_mappings:
trade_item_code = trade_to_production_mappings[item_code][0]
trade_item_conv = trade_to_production_mappings[item_code][1]
if item_code in fodder_to_crop_mappings:
item_code = fodder_to_crop_mappings[item_code]
trade_item_code = item_code
cc = country_code if country_code!=china_producing_code else china_trade_code
spec = {'year':year,'countrycode':cc,'itemcode':trade_item_code,'elementcode':{'$in':import_codes+export_codes}}
fields = {'elementcode':1,'value':1}
qry = table_tradecropslivestock.find(spec,fields,sort=[('elementcode',-1)])
for rec in qry:
reported = True
if rec['elementcode'] in import_codes:
ssr += trade_item_conv*float(rec['value'])
elif rec['elementcode'] in export_codes and incl_exports:
ssr -= trade_item_conv*float(rec['value'])
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':{'$in':[production_code,-5511]}}#the -5511 is total production in livestockproduction
fields = {'elementcode':1,'value':1}
qry = table.find(spec,fields,sort=[('elementcode',-1)])
for rec in qry:
reported = True
if rec['value']+ssr!=0:
ssr = rec['value']/(rec['value']+ssr)
elif rec['value']+ssr==0 and rec['value']!=0:
ssr = float('inf')
else:
ssr = 0.0
#if no data is reported
if not reported and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_ssr(year+1,country_code,item_code,incl_exports,flag,org_year,next_dir,aggregate_level)
elif not reported and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set('Ny'))#flag.translate(None,'Ny')+"Py"
#flag.append("Py")
return get_ssr(org_year-1,country_code,item_code,incl_exports,flag,org_year,next_dir,aggregate_level)
elif not reported and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append("Py")
return get_ssr(year-1,country_code,item_code,incl_exports,flag,org_year,next_dir,aggregate_level)
elif not reported:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])
return get_ssr(org_year,region_code,item_code,incl_exports,flag,org_year,next_dir,aggregate_level)
#if ''.join(flag)!='':
# flag = [str(item_code)]+flag
flag = ''.join(flag)
return ssr,flag
def get_feed_quantities(year,country_code,lp_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,domestic=True):
"""
Given a year country_code and primary livestock item code, return a dictionary with the key
being the feed component code (e.g. 56 for maize, etc.) and the value being the quantity of that
item in the feed used to produce the primary livestock item. The quantities may be scaled by the
corresponding self-sufficiency ratios (i.e. if ssr=True)
To do : could to the get next / get previous thing
To do : create a db collection and modify calls to read from db.
"""
feed_share,fs_flag = get_feed_shares(year,country_code,lp_code,from_db=True)
feed_share = float(feed_share)
#feed_share = feed_shares[lp_code]
#if fs_flag!='':
# flag.extend(["FS",fs_flag,"FS"])
#print year,country_code,lp_code
feed_quantities = {v:0.0 for v in feed_items_in_production}
fields = {'itemcode':1,'elementcode':1,'value':1}
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_items_in_balance}}#,'elementcode':feed_code}
is_balanced = table_commoditybalance.find(spec,fields).count()
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_items_in_production},'elementcode':production_code}
is_produced = table_productioncrops.find(spec,fields).count()
if is_balanced:
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_items_in_balance},'elementcode':feed_code}
qry,f = find(table_commoditybalance,spec,fields)
for rec in qry:
commodity_item_code = rec['itemcode']
production_item_code = feed_balance_production_mappings[commodity_item_code]
feed_quantities[production_item_code] = rec['value']*feed_share
if domestic:
ssr,ssr_flag = get_ssr(year,country_code,production_item_code,from_db=False)
if ssr>1 or ssr<0:
ssr = 1.0
feed_quantities[production_item_code] *= ssr
#if ssr_flag!='':
# flag.extend(["SSR",ssr_flag,"SSR"])
elif is_produced:
flag += "P"
cc = country_code if country_code!=china_producing_code else china_trade_code
spec = {'year':year,'countrycode':cc,'itemcode':{'$in':feed_items_in_production},'elementcode':{'$in':import_codes+export_codes}}
qry,f = find(table_tradecropslivestock,spec,fields)
for rec in qry:
item_code = rec['itemcode']
element_code = rec['elementcode']
if element_code in import_codes:
feed_quantities[item_code]+=float(rec['value'])
elif element_code in export_codes:
feed_quantities[item_code]-=float(rec['value'])
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_items_in_production},'elementcode':production_code}
qry,f = find(table_productioncrops,spec,fields)
for rec in qry:
item_code = rec['itemcode']
fdr,fdr_flag = get_feed_to_domestic_ratios(year,country_code,item_code)
feed_quantities[item_code]+=rec['value']*float(fdr)
feed_quantities[item_code]*=feed_share
if domestic:
ssr,ssr_flag = get_ssr(year,country_code,item_code,from_db=False)
if ssr>1 or ssr<0:
ssr = 1.0
feed_quantities[item_code] *= ssr
#if ssr_flag!='':
# flag.extend(["SSR",ssr_flag,"SSR"])
else:
flag = ["No data"]
flag = ''.join(flag)
return feed_quantities,flag
def get_feed_to_domestic_ratios(year,country_code,crop_code=None,flag=[],org_year=None,next_dir=0,aggregate_level=0):
"""
Given a year and countrycode, return a dictionary with the key being a feed component code (e.g. 56 for maize, etc.)
and the value being the fraction of the domestic supply represented by that feed.
"""
if crop_code is not None:
spec = {'year':year,'countrycode':country_code,'itemcode':crop_code}
fields = {'value':1,'flag':1}
rec,f = find_one(table_feedtodomesticratio,spec,fields)
(fdr,fdr_flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,'No data')
return fdr,fdr_flag
if org_year is None:
org_year = year
feed_to_domestic_ratios = {v:0.0 for v in feed_items_in_production}
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_items_in_balance},'elementcode':{'$in':[feed_code,domestic_supply_code]}}
fields = {'itemcode':1,'elementcode':1,'value':1}
qry = table_commoditybalance.find(spec,fields,sort=[('itemcode',1),('elementcode',1)])
for rec in qry:
item_code = feed_balance_production_mappings[rec['itemcode']]
if rec['elementcode']==domestic_supply_code:
feed_to_domestic_ratios[item_code] = rec['value'] if rec['value']!='' else 0.0 #for some reason, some values are empty strings in commodity balance.
elif rec['elementcode']==feed_code and feed_to_domestic_ratios[item_code] != 0:
feed_to_domestic_ratios[item_code] = rec['value']/feed_to_domestic_ratios[item_code]
#get rid of entries where no feed is reported
for k,v in feed_to_domestic_ratios.iteritems():
feed_to_domestic_ratios[k] = v if v<=1.0 else 0.0
#if no production is reported
no_balance = all(v==0 for v in feed_to_domestic_ratios.values())
if no_balance and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_feed_to_domestic_ratios(year+1,country_code,crop_code,flag,org_year,next_dir,aggregate_level)
elif no_balance and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set(['Ny']))#flag.translate(None,'Ny')+"Py"
#flag.append("Py")
return get_feed_to_domestic_ratios(org_year-1,country_code,crop_code,flag,org_year,next_dir,aggregate_level)
elif no_balance and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append("Py")
return get_feed_to_domestic_ratios(year-1,country_code,crop_code,flag,org_year,next_dir,aggregate_level)
elif no_balance:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])
return get_feed_to_domestic_ratios(org_year,region_code,crop_code,flag,org_year,next_dir,aggregate_level)
#flag = ''.join(flag)
flag = ''
return feed_to_domestic_ratios,flag
def get_processed_quantity(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0):
"""
Get the quantity of item_code that was reported by country_code to be used for processing in the given year.
"""
commodity_code = primary2commodity_mappings[item_code]
spec = {'year':year,'countrycode':country_code,'itemcode':commodity_code,'elementcode':{'$in':processed_codes}}
fields = {'value':1}
rec,f = find_one(table_commoditybalance,spec,fields)
quantity = rec['value'] if rec is not None else 0.0
return quantity
def get_livestockprimary_production(year,country_code,lp_code=None,imports=True,exports=True,cull=True,flag=[],org_year=None,next_dir=0,aggregate_level=0,**kwargs):
"""
Given a year and country code return a dictionary with the key being the primary livetock
commodity code and the value being the production adjusted for import/export of liveanimals
and for culling dairy/egg producing animals.
"""
if lp_code is not None:
if imports and exports and cull:
table = table_livestockproductionimportexportcull
elif imports and exports:
table = table_livestockproductionimportexport
elif exports:
table = table_livestockproductionexport
else:
table = table_livestockproductionnoadj
(lpp_production,lpp_flag) = ({'T':0.0,'ML':0.0,'P':0.0},"No data")
spec = {'year':year,'countrycode':country_code,'itemcode':lp_code}#,'elementcode':sys_code}
fields = {'elementcode':1,'value':1,'flag':1}
qry,f = find(table,spec,fields)
for rec in qry:
if rec['elementcode']==-5511:
lpp_production['T'] = rec['value']
elif rec['elementcode']==-5512:
lpp_production['ML'] = rec['value']
elif rec['elementcode']==-5513:
lpp_production['P'] = rec['value']
lpp_flag = ''
return lpp_production,lpp_flag
"""if lp_code is not None:
if imports and exports and cull:
table = table_livestockproductionimportexportcull
elif imports and exports:
table = table_livestockproductionimportexport
elif exports:
table = table_livestockproductionexport
else:
table = table_livestockproductionnoadj
spec = {'year':year,'countrycode':country_code,'itemcode':lp_code,'elementcode':sys_code}
fields = {'value':1,'flag':1}
rec,f = find_one(table,spec,fields)
(lpp_production,lpp_flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return lpp_production,lpp_flag
"""
if org_year is None:
org_year = year
all_codes = bovine_meat_codes+ovine_meat_codes+milk_codes+pig_meat_codes+poultry_meat_codes+egg_codes
meat_codes = bovine_meat_codes+ovine_meat_codes+pig_meat_codes+poultry_meat_codes
animal_codes = [livestock_mappings[code] for code in meat_codes]
milkegg_codes = milk_codes+egg_codes
productions = {code:0 for code in all_codes}
productions_ML = {code:0 for code in all_codes}
productions_P = {code:0 for code in all_codes}
#get production of primary livestock products
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':all_codes},'elementcode':production_code}
fields = {'itemcode':1,'value':1}
qry,f = find(table_productionlivestockprimary,spec,fields)
for rec in qry:
productions[rec['itemcode']] = rec['value'] #productions now holds productions
#print "Raw productions",productions
#adjust meat productions for import/export of live animals
if imports or exports:
cc = country_code if country_code!=china_producing_code else china_trade_code
spec = {'year':year,'countrycode':cc,'itemcode':{'$in':animal_codes},'elementcode':{'$in':import_codes+export_codes}}
fields = {'itemcode':1,'elementcode':1,'value':1}
qry,f = find(table_tradeliveanimals,spec,fields)
for rec in qry:
animal_code = rec['itemcode']
carcass_weight = get_carcass_weight(year,country_code,animal_code)
item_code = livestock_reverse_mappings[animal_code]
value = carcass_weight*rec['value']
if rec['elementcode'] in import_codes and imports:
productions[item_code]-=value
elif rec['elementcode'] in export_codes and exports:
productions[item_code]+=value #productions now holds productions adjusted for import/export
#print "Import/Export adjustments",productions
if cull:
#adjust meat productions for culling of dairy/egg animals.
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':milkegg_codes},'elementgroup':producing_animals_group}
fields = {'itemcode':1,'elementcode':1,'value':1}
qry,f = find(table_productionlivestockprimary,spec,fields)
for rec in qry:
conv = 1000.0 if rec['elementcode'] in khead_codes else 1.0
value = conv*rec['value']
meat_code = milkeggs_meat_mappings[rec['itemcode']]
animal_code = milkeggs_animal_mappings[rec['itemcode']]
carcass_weight = get_carcass_weight(year,country_code,animal_code)
cull_rate = get_livestock_stats(year,country_code,rec['itemcode'])[0]['cull_rate']
excess = value*carcass_weight*cull_rate
productions[meat_code]-=excess #productions now holds productions adjusted for culling.
#print "Culling adjustments",productions
region_code = get_country_region(country_code)
#remove negative values
for k,v in productions.iteritems():
productions[k] = v if v>0 else 0.0
#if no production is reported
no_production = all(v==0 for v in productions.values())
#########Following maybe add back in optionally ############
if no_production and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_livestockprimary_production(year+1,country_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_production and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set(['Ny']))
#flag.append('Py')#flag.translate(None,'Ny')+"Py"
return get_livestockprimary_production(org_year-1,country_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_production and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append("Py")
return get_livestockprimary_production(year-1,country_code,lp_code,imports,exports,cull,flag,org_year,next_dir,aggregate_level)
elif no_production:
productions = {"T":productions,"ML":productions_ML,"P":productions_P}
flag = 'No data'
return productions,flag
#split productions into ML and P agri-system
yr = year-1970 #Bouwman et al. (2005) data starts at 1970, but the quadratic params a,b,c are fitted to the shifted data where 1970 -> 0
for item_code in all_codes:
spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':item_code}
fields = {'a':1,'b':1,'c':1}
qry,f = find(table_systemproductionfractions,spec,fields,sort=[('aggregatecode',-1)])
rec = qry.next()
MLfrac = rec['a']*yr*yr + rec['b']*yr + rec['c'] #fraction of production derived from mixed+landless systems
productions_ML[item_code] = MLfrac*productions[item_code]
productions_P[item_code] = (1-MLfrac)*productions[item_code]
if no_production:
flag = ["No data"]
productions = {"T":productions,"ML":productions_ML,"P":productions_P}
#flag = ''.join(flag)
flag = ''
return productions,flag
def get_feed_shares(year,country_code,lp_code=None,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given a year and country code return a dictionary with the key being the primary livetock
commodity code and the value being the fraction of the country's feed assigned to that commodity.
"""
if from_db and lp_code is not None:
spec = {'year':year,'countrycode':country_code,'itemcode':lp_code}
fields = {'value':1,'flag':1}
rec,f = find_one(table_feedshares,spec,fields)
(feed_share,fs_flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return feed_share,fs_flag
if org_year is None:
org_year = year
all_codes = bovine_meat_codes+ovine_meat_codes+milk_codes+pig_meat_codes+poultry_meat_codes+egg_codes
meat_codes = bovine_meat_codes+ovine_meat_codes+pig_meat_codes+poultry_meat_codes
animal_codes = [livestock_mappings[code] for code in meat_codes]
milkegg_codes = milk_codes+egg_codes
shares,s_flag = get_livestockprimary_production(year,country_code)
shares = shares["ML"] #just grab the ML
#if s_flag!='':
# flag.extend(["P",s_flag,"P"])
shares[1124] = 0.0 #horses mules and asses feed is negligible (reference!!!)
shares[1097] = 0.0
shares[1108] = 0.0
region_code = get_country_region(country_code)
#get feed quantities
for item_code in all_codes:
feed_conversion = get_feed_conversion_relative(year,region_code,item_code)
if item_code in bovine_meat_codes:
itemtypecode = 0
elif item_code in milk_codes:
itemtypecode = 1
elif item_code in ovine_meat_codes:
itemtypecode = 2
elif item_code in pig_meat_codes:
itemtypecode = 3
elif item_code in poultry_meat_codes:
itemtypecode = 4
elif item_code in egg_codes:
itemtypecode = 5
else:
print "get_feed_shares",year,country_code,item_code
raise ValueError
spec = {'aggregatecode':region_code,'itemtypecode':itemtypecode}
fields = {'value':1}
rec,f = find_one(table_feedfoodfractions,spec,fields)
food_frac = rec['value']
shares[item_code] = shares[item_code]*feed_conversion*food_frac #shares now holds quantity of feed
#normalize the shares wrt the sum
s = 1.0*sum(shares.values())
if s==0:
flag = []#list(set(flag)-set(['Py','Ny'])-set(['P','No data','P']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])
return get_feed_shares(org_year,region_code,lp_code,flag,org_year,next_dir,aggregate_level)
shares = {k:v/s for k,v in shares.iteritems()}
#flag = ''.join(flag)
flag = ''
return shares,flag
def get_feed_crop_area(year,country_code,item_code,production_quantity,include_pasture=True):
"""
Given a year, country code, primary livestock code, and primary livestock item quantity,
this function returns the area of crop land required to produce the feed required
to produce the given quantity of primary livestock item.
"""
if include_pasture:
print "Including pasture"
ret_flag = ''
year = year if year!=2010 else 2009 #band-aid since commodity balances not available for 2010 yet
region_code = get_country_region(country_code)
#get feed conversion (dry matter to livestock product) for given year, country and item
feed_conversion = get_feed_conversion(year,region_code,item_code)
#print "Feed conversion",feed_conversion
#convert production_quantity into feed quantity
feed_quantity = production_quantity*feed_conversion#/0.7 #0.7 is conversion of fresh grain to dry matter https://www.google.ca/url?sa=t&rct=j&q=&esrc=s&source=web&cd=3&sqi=2&ved=0CD8QFjAC&url=http%3A%2F%2Fwww.ag.ndsu.edu%2Fextension-aben%2Fdocuments%2Fae905.pdf&ei=lCJCUZycNYTW2AW73YCICg&usg=AFQjCNHbJ2yoaagwMZfa419b3OBOVJcokQ&sig2=13t9lxkISc_MJ3D-jsM1WA
#print "Feed quantity",feed_quantity
#get feed compositions for given country and item
spec = {'aggregatecode':region_code,'itemcode':item_code}
fields = {'feedcode':1,'value':1}
qry,flag = find(table_feedmixes,spec,fields)
#qry = db.feedmixes.find({'aggregatecode':aggregate_code,'itemcode':item_code},{'feedcode':1,'value':1})
crop_area = 0
crop_areas = {0:0, 1:0, 2:0, 3:0, 4:0}
for rec in qry:
if rec['feedcode']==1 and not include_pasture: #hack until can deal with pasture feed
continue
feed_components_balance = feedcode_mappings_balance[rec['feedcode']]
feed_components_production = feedcode_mappings_production[rec['feedcode']]
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_components_production},'elementcode':production_code}
total_of_components_production,flag = find_sum(table_productioncrops,spec,'value')
if flag!='':
ret_flag += "TP"+flag
#print "Total of components (production)",total_of_components_production
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_components_balance},'elementcode':feed_code}
total_of_components_balance,flag = find_sum(table_commoditybalance,spec,'value')
#print "SPec",spec
if flag!='':
ret_flag +="TB"+flag
#print "Total of components (balance)",total_of_components_balance
#print "Feedcode",rec['feedcode']
#print "-------------------------------"
for component_balance,component_production in zip(feed_components_balance,feed_components_production):
#get production,import and feed quantities reported in commodity balances for each component.
#print "Component",component_balance,component_production
#if country_code not in balance_reporters or rec['feedcode']==1:
if total_of_components_balance==0 or rec['feedcode']==1:
#p = db.productioncrops.find_one({'year':year,'countrycode':country_code,'itemcode':component_production,'elementcode':production_code},{'value':1})['value']
#i = db.tradecropslivestock.find_one({'year':year,'countrycode':country_code,'itemcode':component_production,'elementcode':{'$in':import_codes}},{'value':1})['value']
#frac1 = (p/(p+i)) #fraction of supply that is domestic
cc = country_code if country_code!=china_producing_code else china_trade_code
spec = {'year':year,'countrycode':cc,'itemcode':component_production,'elementcode':{'$in':import_codes}}
fields = {'value':1}
rec2,flag2 = find_one(table_tradecropslivestock,spec,fields)
if flag2!='':
ret_flag += "I"+flag2
i = rec2['value'] if rec2 is not None else 0.0
#print "Imports",i
spec = {'year':year,'countrycode':country_code,'itemcode':component_production,'elementcode':production_code}
fields = {'value':1}
rec2,flag2 = find_one(table_productioncrops,spec,fields)
if flag2!='':
ret_flag += "P"+flag2
p = rec2['value'] if rec2 is not None else 0.0
#print "Production",p
if rec['feedcode']==1:
frac1 = (p/(p+i)) if p!=0 else 1.0 #fraction that is domestic
frac2 = (p/total_of_components_production) if total_of_components_production!=0 else 1.0/len(feed_components_production) #fraction of "feed" attributed to this component
else:
frac1 = (p/(p+i)) if p!=0 else 0.0 #fraction that is domestic
frac2 = (p/total_of_components_production) if total_of_components_production!=0 else 0.0 #fraction of "feed" attributed to this component
feed = feed_quantity*rec['value']*frac1
#print "Feed",feed
"""try:
spec = {'year':year,'countrycode':country_code,'itemcode':component_production,'elementcode':yield_code}
fields = {'value':1}
yld = Hg2tonnes*find_one(table_productioncrops,spec,fields,get_next=('year','$gt'))['value']
except TypeError:
continue"""
#elif country_code in balance_reporters:
elif total_of_components_balance!=0:
spec = {'year':year,'countrycode':country_code,'itemcode':component_balance,'elementcode':{'$in':import_codes+[production_code,feed_code]}}
fields = {'elementcode':1,'value':1}
qry2,flag2 = find(table_commoditybalance,spec,fields)
if flag2!='':
ret_flag += "B"+flag2
p,f,i = 0,0,0
for rec2 in qry2:
if rec2['elementcode']==production_code:
p = rec2['value']
#print "Production",p
elif rec2['elementcode']==feed_code:
f = rec2['value']
#print "Feed balance",f
else:
i = rec2['value']
#print "Import",i
#print year, country_code, component_balance, total_of_components_balance
frac1 = (p/(p+i)) if p+i!=0 else 0.0 #fraction of supply that is domestic
frac2 = (f/total_of_components_balance)# if total_of_components!=0 else 0.0#fraction of feed attributed to this component
feed = feed_quantity*rec['value']*frac1*frac2
#print "Feed",feed
else:
raise ValueError
yld,yflag = get_yield(year,country_code,component_production)
if yflag!='':
ret_flag += "Y"+yflag
if yld is None or yld==0.0:
yld,yflag = get_yield(year,world_code,component_production)
ret_flag += "Y"+yflag+'w'
#if yld is None or yld==0.0:
# crop_areas[rec['feedcode']] += 0.0
# continue
crop_areas[rec['feedcode']] += feed/yld
crop_area += feed/yld
#print crop_areas
return crop_area,ret_flag
def get_feed_conversion_relative(year,region_code,item_code):
"""
Given a year, aggregate (region) code, and primary livestock item code
this function returns the feed conversion rate (i.e. the number of kilograms
of feed required to produce one kilogram of the primary livestock item.
"""
#get conversion parameters (a,b,c are quadratic params fitting Bouwman et al. 2005)
spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':item_code}
fields = {'system':1,'a':1,'b':1,'c':1}
qry,flag = find(table_feedconversionparams,spec,fields,sort=[('aggregatecode',-1)])
yr = year-1970 #Conversion params start at 1970, but the quadratic params a,b,c are fitted to the shifted data where 1970 -> 0
Pconv = 0.0
for rec in qry: #There are individual values for Pastoral and Mixed+Landless systems
if rec['system']=="P":
Pconv = rec['a']*yr*yr + rec['b']*yr + rec['c'] #feed conversion for pastoral production
elif rec['system']=="ML":
MLconv = rec['a']*yr*yr + rec['b']*yr + rec['c'] #feed conversion for mixed+landless production
# Get production fractions for each system
#spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':item_code}
fields = {'a':1,'b':1,'c':1}
#print "spec",spec
qry,flag = find(table_systemproductionfractions,spec,fields,sort=[('aggregatecode',-1)])
rec = qry.next()
#if rec is None:
# rec = db.systemfractions.find_one({'aggregatecode':world_code,'itemcode':item_code},{'a':1,'b':1,'c':1})
MLfrac = rec['a']*yr*yr + rec['b']*yr + rec['c'] #fraction of production derived from mixed+landless systems
feed_conversion = Pconv*(1-MLfrac) + MLconv*MLfrac
return feed_conversion
def get_pasture_areas(year,country_code,animal_code=None,flag=[],org_year=None,next_dir=0,aggregate_level=0):
"""
Returns a dictionary of key:value pairs where key is the item code of a live animal
and value is the area of pasture assigned to that animal in the given year and country.
The percentage of the total pasture assigned to each animal is equal to the percentage
of the total animal population (in livestock units) made up of that animal.
"""
if animal_code is not None:
(pasture_area,flag) = ({'T':0.0,'ML':0.0,'P':0.0},'No data')
spec = {'year':year,'countrycode':country_code,'itemcode':animal_code}#,'elementcode':sys_code}
fields = {'elementcode':1,'value':1,'flag':1}
qry,f = find(table_pastureareas,spec,fields)
for rec in qry:
if rec['elementcode']==-3010:
pasture_area['T']=rec['value']
elif rec['elementcode']==-3011:
pasture_area['ML']=rec['value']
elif rec['elementcode']==-3012:
pasture_area['P']=rec['value']
else:
print "Invalid elementcode in pastureareas"
raise ValueError
flag = ''
return pasture_area,flag
if org_year is None:
org_year = year
ruminant_codes = bovine_codes+ovine_codes
ruminant_meat_codes = bovine_meat_codes+ovine_meat_codes
pasture_areas = {k:0.0 for k in ruminant_meat_codes}
pasture_areas_ML = {k:0.0 for k in ruminant_meat_codes}
pasture_areas_P = {k:0.0 for k in ruminant_meat_codes}
#if country_code in country_mappings:
# country_code = country_mappings[country_code]
# flag += "Cm"
livestock_units = get_livestock_units(country_code)
total_pasture_area = 0.0
#year = year if year!=2010 else 2009 #This is a band-aid since 2010 land data is not available yet
#get total pasture area
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':pasture_codes}}
total_pasture_area,f = find_sum(table_land, spec, 'value')
total_pasture_area = 1000.0*total_pasture_area #1000.0 prefactor since land area is in kHa
if total_pasture_area == 0.0: #no data on pasture area, so assume
#flag.append("Fr")
spec = {'year':year,'countrycode':country_code,'itemcode':agricultural_land_code,'elementcode':area_code}
fields = {'value':1}
rec,f = find_one(table_land,spec,fields)
#rec = db.land.find_one({'year':yr,'countrycode':country_code,'itemcode':agricultural_land_code,'elementcode':area_code},{'value':1})
total_pasture_area = 1000.0*0.69*rec['value'] if rec is not None else 0.0 # 0.69 of agricultural land is pasture (world average)
no_data = total_pasture_area==0.0
if no_data and next_dir>-1 and year<max_year-1: # the -1 is a band-aid since 2010 land data is not available yet
next_dir = 1
#flag = list(set(flag)-set(['Fr']))#flag.translate(None,'Fr') + "Ny"
#flag.append('Ny')
return get_pasture_areas(year+1,country_code,animal_code,flag,org_year,next_dir,aggregate_level)
elif no_data and year==max_year-1 and org_year!=min_year: # the -1 is a band-aid since 2010 land data is not available yet
next_dir = -1
#flag = list(set(flag)-set(['Fr'])-set(['Ny']))#flag.translate(None,'Fr') + "Ny"
#flag.append('Py')
return get_pasture_areas(org_year-1,country_code,animal_code,flag,org_year,next_dir,aggregate_level)
elif no_data and next_dir < 0 and year>min_year:
next_dir = -1
#flag = list(set(flag)-set(['Fr']))#flag.translate(None,'Fr') + "Ny"
#flag.append('Py')
return get_pasture_areas(year-1,country_code,animal_code,flag,org_year,next_dir,aggregate_level)
elif no_data:
flag = "No data"
pasture_areas = {"T":pasture_areas,"ML":pasture_areas_ML,"P":pasture_areas_P}
return pasture_areas,flag
#get parts of pasture area that are in ML and P agri-systems
#P
region_code = get_country_region(country_code)
yr = year-1970 #Bouwman et al. (2005) data starts at 1970, but the quadratic params a,b,c are fitted to the shifted data where 1970 -> 0
spec = {'aggregatecode':{'$in':[region_code,world_code]},'system':'P'}
fields = {'a':1,'b':1,'c':1}
qry,f = find(table_systemareafractions,spec,fields,sort=[('aggregatecode',-1)])
rec = qry.next()
Pfrac = rec['a']*yr*yr + rec['b']*yr + rec['c'] #fraction grassland in pastoral system
total_pasture_area_P = total_pasture_area*Pfrac
#ML Note that Pfrac != 1-MLfrac because some grassland may be marginal
spec = {'aggregatecode':{'$in':[region_code,world_code]},'system':'ML'}
qry,f = find(table_systemareafractions,spec,fields,sort=[('aggregatecode',-1)])
rec = qry.next()
MLfrac = rec['a']*yr*yr + rec['b']*yr + rec['c'] #fraction of production derived from mixed+landless systems
total_pasture_area_ML = total_pasture_area*MLfrac
total_pasture_area = total_pasture_area_ML + total_pasture_area_P
#print yr,total_pasture_area,total_pasture_area_P,total_pasture_area_ML,Pfrac,MLfrac
total_animals = 0.0
total_animals_P = 0.0
total_animals_ML = 0.0
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':ruminant_codes}}
fields = {'itemcode':1,'elementcode':1,'value':1}
qry,f = find(table_productionlivestock,spec,fields)
for rec in qry:
mult = 1000.0 if rec['elementcode'] in khead_codes else 1.0 #birds and rodents expressed in 1000 heads
num_producing_animals = 0
#item_code = None
if rec['itemcode'] in milkeggs_animal_mappings.values(): # separate out milk/egg producing animals
item_code = animal_milkeggs_mappings[rec['itemcode']]
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementgroup':producing_animals_group}
fields = {'value':1}
r,f = find_one(table_productionlivestockprimary,spec,fields)
num_producing_animals = r['value'] if r is not None else 0
#now break these animals up into ML and P parts and convert to livestock units
spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':item_code}
fields = {'a':1,'b':1,'c':1}
qry2,f2 = find(table_systemanimalfractions,spec,fields,sort=[('aggregatecode',-1)])
rec2 = qry2.next()
MLfrac = rec2['a']*yr*yr + rec2['b']*yr + rec2['c'] #fraction of animals from mixed+landless systems
num_livestock_units = mult*livestock_units[rec['itemcode']]*num_producing_animals
#num_producing_animals_ML = mult*livestock_units[rec['itemcode']]*num_producing_animals*MLfrac
num_producing_animals_ML = num_livestock_units*MLfrac
pasture_areas_ML[item_code] = num_producing_animals_ML
#num_producing_animals_P = mult*livestock_units[rec['itemcode']]*num_producing_animals*(1-MLfrac)
#num_producing_animals_P = num_livestock_units - num_producing_animals_ML
num_producing_animals_P = num_livestock_units*(1-MLfrac)
total_animals_ML += num_producing_animals_ML
pasture_areas_P[item_code] = num_producing_animals_P
total_animals_P += num_producing_animals_P
pasture_areas[item_code] = num_producing_animals_ML + num_producing_animals_P
total_animals += num_producing_animals_ML+num_producing_animals_P
#the rest are meat animals
num_meat_animals = rec['value']-num_producing_animals
#now break these animals up into ML and P parts and convert to livestock units
meat_code = livestock_reverse_mappings[rec['itemcode']]
spec = {'aggregatecode':{'$in':[region_code,world_code]},'itemcode':meat_code}
fields = {'a':1,'b':1,'c':1}
qry2,f2 = find(table_systemanimalfractions,spec,fields,sort=[('aggregatecode',-1)])
rec2 = qry2.next()
MLfrac = rec2['a']*yr*yr + rec2['b']*yr + rec2['c'] #fraction of animals from mixed+landless systems
num_livestock_units = mult*livestock_units[rec['itemcode']]*num_meat_animals
#num_meat_animals_ML = mult*livestock_units[rec['itemcode']]*num_meat_animals*MLfrac
num_meat_animals_ML = num_livestock_units*MLfrac
pasture_areas_ML[meat_code] = num_meat_animals_ML
total_animals_ML += num_meat_animals_ML
#num_meat_animals_P = mult*livestock_units[rec['itemcode']]*num_meat_animals*(1-MLfrac)
num_meat_animals_P = num_livestock_units - num_meat_animals_ML
pasture_areas_P[meat_code] = num_meat_animals_P
total_animals_P += num_meat_animals_P
pasture_areas[meat_code] = num_meat_animals_ML + num_meat_animals_P
total_animals += num_meat_animals_ML+num_meat_animals_P
# Normalize
if total_animals_P > 0:
pasture_areas_P = {i:total_pasture_area_P*(p/total_animals_P) for i,p in pasture_areas_P.iteritems()}
else:
pasture_areas_P = {i:0 for i,p in pasture_areas_P.iteritems()}
if total_animals_ML > 0:
pasture_areas_ML = {i:total_pasture_area_ML*(p/total_animals_ML) for i,p in pasture_areas_ML.iteritems()}
else:
pasture_areas_ML = {i:0 for i,p in pasture_areas_ML.iteritems()}
if total_animals > 0:
for i in pasture_areas:
pasture_areas[i] = pasture_areas_ML[i] + pasture_areas_P[i]
else:
pasture_areas = {i:0 for i,p in pasture_areas.iteritems()}
pasture_areas = {"T":pasture_areas,"ML":pasture_areas_ML,"P":pasture_areas_P}
#flag = ''.join(flag)
flag = ""
return pasture_areas,flag
def get_production_info(year,country_code,source_codes):
"""
Given a year, country code and a list of primary item codes (derived from get_source_tree),
this function returns a list of associated yields, and a list of production quantities for
the corresponding items.
For primary livestock items, yield is obtained from production divided by the sum associated
pasture area (see get_pasture_areas) and crop area used to produce feed (see get_feed_crop_area).
-'year' is the year for which to get the info.
-'country_code' specifies the country for which to get the info.
-'source_codes' a list of primary item codes.
"""
#map partner to associated producer if applicable.
#country_code = country_mappings[country_code] if country_code in country_mappings else country_code
if country_code in country_mappings:
country_code = country_mappings[country_code]
yields = []
productions = []
#pasture_areas = get_pasture_areas(year,country_code)
livestock_units = None
for item_code in source_codes:
#get the yield
#print year,country_code,item_code
y,yflag = fetch_yield(year,country_code,item_code)
#print year,country_code,item_code,y,yflag
#####y = y if y!=0.0 else None
yields.append(y)
#get the production
p,pflag = get_production(year,country_code,item_code)
productions.append(p)
#print "Yields,productions",yields,productions,pasture_areas,animal_code,pasture_area,p
return yields,productions
def get_livestock_units(country_code):
"""
A livestock unit is a measure that allows one to compare numbers of different
livestock species (e.g. 1 goat is equivalent to 0.1 North American cattle).
The conversion factors vary by world region. Give a country code, this function
returns a dictionary of key:value pairs where key is a live animal item code and
value is the conversion factor of that animal to equivalent livestock units.
This is mainly used to determine stocking rates in terms of required grazing land.
"""
value_label = 'value2' #'value2' => poultry, pigs and rodents are assumed landless. Use 'value1' otherwise.
if country_code in country_mappings:
country_code = country_mappings[country_code]
livestock_units = {}
region_code = get_country_region(country_code)
spec = {'aggregatecode':region_code}
fields = {'itemcode':1,value_label:1}
qry,flag = find(table_livestockunits,spec,fields)
#qry = db.livestockunits.find({'aggregatecode':aggregate_code},{'itemcode':1,value_label:1})
for rec in qry:
livestock_units[rec['itemcode']] = rec[value_label]
return livestock_units
def get_trade_matrix(country_code,item_code,field):
"""
Returns a cursor to trade matrix records corresponding to the given reporter and item, sorted by year.
"""
try:
spec = {field:country_code,'itemcode':item_code}
trade_matrix,flag = find(table_tradematrix,spec,sort=[('year',1)])
#trade_matrix = db.tradematrix.find({'reportercode':reporter_code,'itemcode':item_code}).sort('year',1)#,{'partnercode':1,'elementcode':1,'value':1,'unit':1})
return trade_matrix
except TypeError:
return None
def get_source_tree(item_code):
"""
Many traded commodities are derived from primary (un-processed) commodities.
Given a target item code, this function returns (sources) a list of primary item
codes that compose the given target item, (multipliers) a list of corresponding
conversion factors to convert mass of processed commodity to equivalent mass
of primary commodity, and (flags) a list of flags indicating if the correponding
primary commodities are by-products (e.g. Dregs, Bran, etc.),
or parts of a compound commodity (i.e. when sources has length greater than 1,
e.g. "Breakfast Cereals (41)" is composed of several primary cereal crops).
-flags:
0 -> product derived from a single primary commodity (e.g. Wheat flour, Chicken meat)
1 -> by-product derived from a single primary commodity (e.g. Wheat bran, Chicken fat)
2 -> product derived from multiple primary commodities (e.g. Breakfast cereals)
3 -> by-product derived from multiple primary commodities (e.g. Tallow, Dregs)
"""
sources = []
multipliers = []
flags = [] #"flags" here actually corresponds to "byproduct" in the database
spec = {'itemcode':item_code}
qry,flag = find(table_commoditytrees,spec)
#qry = db.commoditytrees.find({'itemcode':item_code})
for rec in qry:
sources.append(rec['parentcode'])
flags.append(rec['byproduct'])
m = rec['value'] if rec['byproduct'] not in byproduct_codes else float('inf')
multipliers.append(m)
if sources==[]:
return [item_code],[1.0],[0]
else:
return sources,multipliers,flags
def get_trade_quantities(year,country_code,source_codes):
"""
Given a year, country code and a list of item codes, this function returns
a list (imports) of import quantities and a list (exports) of the corresponding
items.
Note: Imports and exports given by this function may be in different from
those obtained by summing over the trade matrix.
"""
imports = []
exports = []
cc = country_code if country_code!=china_producing_code else china_trade_code
for item_code in source_codes:
i,e = 0.0,0.0
spec = {'year':year,'countrycode':cc,'itemcode':item_code}
fields = {'elementcode':1,'value':1}
qry,flag = find(table_tradecropslivestock,spec,fields)
#qry = db.tradecropslivestock.find({'year':year,'countrycode':country_code,'itemcode':item_code},{'elementcode':1,'value':1})
for rec in qry:
if rec['elementcode'] in import_codes:
i += rec['value']
elif rec['elementcode'] in export_codes:
e += rec['value']
if item_code in livestockprimary_codes+livestock_codes:
if item_code in livestockprimary_codes:
item_code = livestock_mappings[item_code]
carcass_weight = get_carcass_weight(year,country_code,item_code)
spec = {'year':year,'countrycode':cc,'itemcode':item_code}
qry,flag = find(table_tradeliveanimals,spec,fields)
for rec in qry:
if rec['elementcode'] in import_codes:
i += carcass_weight*rec['value']
elif rec['elementcode'] in export_codes:
e += carcass_weight*rec['value']
imports.append(i)
exports.append(e)
return imports,exports
def get_production(year,country_code,item_code,sys_code=-5511,imports=True,exports=True,cull=True,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Get the production quantity for the given primary item code (see get_source_codes)
or live animal code. See get_yield for more details.
-'year' is the year for which to get the yield.
-'country_code' specifies the country for which to get the yield.
-'item_code' specifies the primary item whose yield is to be calculated.
-'get_next' specifies whether to get the next available record if
none exists for the specified year.
-'aggregate' specifies whether to average over the country's (sub-)continent
if none exists for the specified country.
Note: Country mappings are automatically applied.
"""
if country_code in country_mappings:
country_code = country_mappings[country_code]
#flag = ....
if item_code in crop_codes:
return get_crop_production(year,country_code,item_code,from_db=from_db)
elif item_code in livestockprimary_codes:
return get_livestockprimary_production(year,country_code,item_code,imports,exports,cull,from_db=from_db)
elif item_code in livestock_codes:
item_code = livestock_reverse_mappings[item_code]
return get_livestockprimary_production(year,country_code,item_code,imports,exports,cull,from_db=from_db)
else:
raise ValueError
def get_carcass_weight(year, country_code, item_code, get_next=True, aggregate=True):
"""
Given a year, country code and live animal code (item_code), return the carcass weight
in tonnes of that animal.
-'year' is the year for which to get the carcass weight.
-'country_code' specifies the country for which to get the carcass weight.
-'item_code' specifies the primary item whose carcass weight is to be calculated.
-'get_next' specifies whether to get the next available record if
none exists for the specified year.
-'aggregate' specifies whether to average over the country's (sub-)continent
if none exists for the specified country.
"""
if country_code in country_mappings:
country_code = country_mappings[country_code]
lp_code = livestock_reverse_mappings[item_code]
spec = {'year':year,'countrycode':country_code,'itemcode':lp_code,'elementcode':{'$in':carcass_codes}}
fields = {'elementcode':1,'value':1}
rec,flag = find_one(table_productionlivestockprimary,spec,fields,get_next=True,aggregate='countrycode')
if rec is not None:
conv = Hg2tonnes if rec['elementcode'] == 5417 else dg2tonnes
carcass_weight = conv*rec['value']
else:
carcass_weight = 0.0
return carcass_weight
def get_fraction_of_ag(year,country_code,item_code,sector="total",flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country code, and crop item code, return the yield.
"""
if from_db:
spec = {'year':year,'countrycode':country_code}
fields = {'value':1,'flag':1}
rec = table_agrilandfraction.find_one(spec,fields)
(y,flag) = (rec['value'],rec['flag']) if rec is not None else (float('inf'),"No data")
return y,flag
agri_land,al_flag = get_agricultural_area(year,country_code,sector=sector,from_db=True)
area_harvested,ah_flag = get_area_harvested(year,country_code,item_code,sector)
if item_code in livestockprimary_codes:
try:
area_harvested = area_harvested[sector] if area_harvested is not None else 0.0
except TypeError:
print year,country_code,item_code,area_harvested
raise
#print year,country_code,item_code,agri_land
frac = area_harvested/float(agri_land) if area_harvested is not None and agri_land!=0.0 else 0.0
flag = ''
return frac,flag
def get_agricultural_area(year,country_code,sector="total",flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country code, and crop item code, return the yield.
"""
if from_db:
if sector=="total":
table = table_agriland
elif sector=="crop":
table = table_cropland
spec = {'year':year,'countrycode':country_code}
fields = {'value':1,'flag':1}
rec = table.find_one(spec,fields)
(y,flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return y,flag
if sector=="total":
item_code = agricultural_land_code
elif sector=="crop":
item_code = {'$in':cropland_codes}
if org_year is None:
org_year = year
fields = {'value':1}
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':area_code}
qry,f = find(table_land, spec, fields)
y=0.0
for rec in qry:
y += 1000.0*rec['value'] if rec is not None else 0.0
no_harvest = y==0.0
if no_harvest:
spec = {'year':{'$gt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':area_code}
rec,f = find_one(table_land, spec, fields, sort=[('year',1)])
y = 1000.0*rec['value'] if rec is not None else 0.0
#flag = "Ny"
no_harvest = y==0.0
if no_harvest:
spec = {'year':{'$lt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':area_code}
rec,f = find_one(table_land, spec, fields, sort=[('year',1)])
y = 1000.0*rec['value'] if rec is not None else 0.0
#flag = "Py"
no_harvest = y==0.0
if no_harvest:
y = 0.0
flag = ["No data"]
#print flag
flag = ''.join(flag)
return y,flag
def get_crop_yield(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country code, and crop item code, return the yield.
"""
if from_db:
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}
fields = {'value':1,'flag':1}
rec,f = find_one(table_cropyields,spec,fields)
(y,flag) = (rec['value'],rec['flag']) if rec is not None else (float('inf'),"No data")
return y,flag
if org_year is None:
org_year = year
fields = {'value':1}
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':yield_code}
rec,f = find_one(table_productioncrops, spec, fields)
y = Hg2tonnes*rec['value'] if rec is not None else None
no_harvest = y is None
if no_harvest:
spec = {'year':{'$gt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':yield_code}
rec,f = find_one(table_productioncrops, spec, fields, sort=[('year',1)])
y = Hg2tonnes*rec['value'] if rec is not None else None
#flag = "Ny"
no_harvest = y is None
if no_harvest:
spec = {'year':{'$lt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':yield_code}
rec,f = find_one(table_productioncrops, spec, fields, sort=[('year',1)])
y = Hg2tonnes*rec['value'] if rec is not None else None
#flag = "Py"
no_harvest = y is None
if no_harvest:
region_code = get_country_region(country_code,1)
spec = {'year':{'$lt':year},'countrycode':region_code,'itemcode':item_code,'elementcode':yield_code}
rec,f = find_one(table_productioncrops, spec, fields)
y = Hg2tonnes*rec['value'] if rec is not None else None
#flag = "A"+str(region_code)
no_harvest = y is None
if no_harvest:
spec = {'year':{'$lt':year},'countrycode':world_code,'itemcode':item_code,'elementcode':yield_code}
rec,f = find_one(table_productioncrops, spec, fields)
y = Hg2tonnes*rec['value'] if rec is not None else None
#flag = "A"+str(world_code)
no_harvest = y is None
if no_harvest:
y = 0.0
flag = "No data"
#flag = ''.join(flag)
flag = ''
return y,flag
def get_crop_area_harvested(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country code, and crop item code, return the area harvested
To do : create the db table
"""
if from_db:
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}
fields = {'value':1,'flag':1}
rec = table_cropareaharvested.find_one(spec,fields)
(a,flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return a,flag
if org_year is None:
org_year = year
fields = {'value':1}
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':area_harvested_code}
rec,f = find_one(table_productioncrops, spec, fields)
a = rec['value'] if rec is not None else None
no_harvest = a is None
if no_harvest:
spec = {'year':{'$gt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':area_harvested_code}
rec,f = find_one(table_productioncrops, spec, fields, sort=[('year',1)])
a = rec['value'] if rec is not None else None
#flag = "Ny"
no_harvest = a is None
if no_harvest:
spec = {'year':{'$lt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':area_harvested_code}
rec,f = find_one(table_productioncrops, spec, fields, sort=[('year',1)])
a = rec['value'] if rec is not None else None
#flag = "Py"
no_harvest = a is None
if no_harvest:
a = 0.0
flag = "No data"
#flag = ''.join(flag)
flag = ''
return a,flag
def get_crop_production(year,country_code,item_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country code, and crop item code, return the yield.
"""
if from_db:
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}
fields = {'value':1,'flag':1}
rec = table_cropproduction.find_one(spec,fields)
(p,flag) = (rec['value'],rec['flag']) if rec is not None else (float('inf'),"No data")
return p,flag
if org_year is None:
org_year = year
fields = {'value':1}
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':production_code}
rec,f = find_one(table_productioncrops, spec, fields)
p = rec['value'] if rec is not None else None
no_harvest = p is None
if no_harvest:
spec = {'year':{'$gt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':production_code}
rec,f = find_one(table_productioncrops, spec, fields, sort=[('year',1)])
p = rec['value'] if rec is not None else None
#flag = "Ny"
no_harvest = p is None
if no_harvest:
spec = {'year':{'$lt':year},'countrycode':country_code,'itemcode':item_code,'elementcode':production_code}
rec,f = find_one(table_productioncrops, spec, fields, sort=[('year',1)])
p = rec['value'] if rec is not None else None
#flag = "Py"
no_harvest = p is None
if no_harvest:
p = 0.0
flag = "No data"
flag = ''
return p,flag
def get_feed_conversion(year,country_code,lp_code,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=False):
"""
Given year, country code and primary livestock item code, return the feed conversion
(i.e. (feed)/(livestock product))
"""
if from_db:
spec = {'year':year,'countrycode':country_code,'itemcode':lp_code}
fields = {'value':1,'flag':1}
rec,f = find_one(table_feedconversion,spec,fields)
(fc,flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return fc,flag
if org_year is None:
org_year = year
flag = ''
feed_quantities,fq_flag = get_feed_quantities(year,country_code,lp_code,domestic=False)
#if fq_flag!='':
# flag += "Fq"+fq_flag+"Fq"
feed_quantity = sum(feed_quantities.values())
##meat_production,mp_flag = get_livestockprimary_production(year,country_code)
##meat_production = meat_production['ML'][item_code]
meat_production,mp_flag = get_livestockprimary_production(year,country_code,lp_code)#,sys_code=-5512)
meat_production = meat_production['ML'] #only production in mixed/landless system uses feed.
#if mp_flag!='':
# flag += "Mp"+mp_flag+"Mp"
no_data = feed_quantity==0 or meat_production==0
if no_data and next_dir>-1 and year<max_year: #get next
next_dir = 1
#flag.append("Ny")
return get_feed_conversion(year+1,country_code,lp_code,flag,org_year,next_dir,aggregate_level)
elif no_data and year==max_year and org_year!=min_year:
next_dir = -1
#flag = list(set(flag)-set(['Ny']))#flag.translate(None,'Ny')+"Py"
#flag.append('Py')
return get_feed_conversion(org_year-1,country_code,lp_code,flag,org_year,next_dir,aggregate_level)
elif no_data and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append('Py')
return get_feed_conversion(year-1,country_code,lp_code,flag,org_year,next_dir,aggregate_level)
elif no_data and country_code!=world_code:
#flag = list(set(flag)-set(['Py'])-set(['Ny']))#flag.translate(None,'Py').translate(None,'Ny')
aggregate_level+=1
region_code = get_country_region(country_code,aggregate_level)
#flag.extend(['A',str(region_code)])#'A'+str(region_code)
return get_feed_conversion(org_year,region_code,lp_code,flag,org_year,next_dir,aggregate_level)
elif no_data:
feed_conversion = {k:0.0 for k,v in feed_quantities.iteritems()}
feed_conversion["total"] = 0.0
return feed_conversion,"No data"
feed_conversion = {k:v/meat_production for k,v in feed_quantities.iteritems()}
feed_conversion["total"] = feed_quantity/meat_production
flag = ''
return feed_conversion,flag
def get_livestockprimary_area_harvested(year,country_code,item_code,sector="total",flag=[],org_year=None,next_dir=0,aggregate_level=0,get_next=False,from_db=False):
"""
Given year, country code, and primary livestock item code, return the area harvested
"""
if from_db:
(lpah,lpa_flag) = ({"T":0.0,"P":0.0,"P_ML":0.0,"P_P":0.0,"C":0.0},"No data")
spec = {'year':year,'countrycode':country_code,'itemcode':item_code}#,'elementcode':sys_code}
fields = {'elementcode':1,'value':1,'flag':1}
qry,f = find(table_livestockareaharvested,spec,fields)
for rec in qry:
if rec['elementcode']==-5313:
lpah['T']=rec['value']
elif rec['elementcode']==-5314:
lpah['C']=rec['value']
elif rec['elementcode']==-5315:
lpah['P']=rec['value']
elif rec['elementcode']==-5316:
lpah['P_ML']=rec['value']
elif rec['elementcode']==-5317:
lpah['P_P']=rec['value']
else:
print "Invalid elementcode in livestockareaharvested"
raise ValueError
lpa_flag = ''
return lpah,lpa_flag
"""if from_db:
if sector == "total":
sys_code = -5313
elif sector == "crop":
sys_code = -5315
elif sector == "pasture":
sys_code = -5314
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':sys_code}
fields = {'value':1,'flag':1}
rec,f = find_one(table_livestockareaharvested,spec,fields)
(lpa_production,lpa_flag) = (rec['value'],rec['flag']) if rec is not None else (0.0,"No data")
return lpa_production,lpa_flag
"""
if org_year is None:
org_year = year
#if item_code in meat_animal_mappings.keys():
# animal_code = meat_animal_mappings[item_code]
#elif item_code in milkeggs_animal_mappings.keys():
# animal_code = item_code
#else:
# print "Itemcode",item_code,"is not a valid code"
# raise ValueError
pasture_area = 0.0
pasture_area_ML = 0.0
pasture_area_P = 0.0
if item_code in items_that_use_pasture:
(pa, pa_flag) = get_pasture_areas(year,country_code,item_code)
pasture_area = pa['T'] #total
pasture_area_ML = pa['ML'] #mixed/landless
pasture_area_P = pa['P'] #pastoral
#print pasture_area,animal_code
#if item_code in items_that_use_pasture:
# spec = {'year':year,'countrycode':country_code,'itemcode':animal_code}
# fields = {'value':1}
# rec,f = find_one(table_pastureareas,spec,fields)
# pasture_area = rec['value']
#pa_flag = rec['flag']
#print pa_flag
#if pa_flag!='':
# flag.extend(["Pa",pa_flag,"Pa"])
#print pasture_area,animal_code
feed_quantities,fq_flag = get_feed_quantities(year,country_code,item_code)
#if fq_flag!='':
# flag.extend(["Fq",fq_flag,"Fq"])
crop_area = 0.0
#for crop_code,quantity in feed_quantities.iteritems():
# yld,y_flag = get_crop_yield(year,country_code,crop_code,from_db=True)
# print quantity,yld
#if y_flag!='':
# flag.extend(["Y",str(crop_code),y_flag,"Y"])
# crop_area += quantity/yld if yld!=0 else 0.0
#Get crop yields in one shot. This is faster than using get_crop_yield
spec = {'year':year,'countrycode':country_code,'itemcode':{'$in':feed_quantities.keys()}}
fields = {'itemcode':1,'value':1}
qry,f = find(table_cropyields,spec,fields)
for rec in qry:
yld = rec['value']
quantity = feed_quantities[rec['itemcode']]
crop_area += quantity/yld if yld!=0 else 0.0
no_harvest = (pasture_area+crop_area)==0
if get_next and no_harvest and next_dir>-1 and year<max_year-1: #the -1 is because land areas aren't available yet for 2010
next_dir = 1
#flag.append("Ny")
return get_livestockprimary_area_harvested(year+1,country_code,item_code,sector,flag,org_year,next_dir,aggregate_level,get_next)
elif get_next and no_harvest and year==max_year-1 and org_year!=min_year: #the -1 is because land areas aren't available yet for 2010
next_dir = -1
#flag = list(set(flag)-set(['Ny']))#flag.translate(None,'Ny')+"Py"
#flag.append('Py')
return get_livestockprimary_area_harvested(org_year-1,country_code,item_code,sector,flag,org_year,next_dir,aggregate_level,get_next)
elif get_next and no_harvest and next_dir < 0 and year>min_year:
next_dir = -1
#flag.append("Py")
return get_livestockprimary_area_harvested(year-1,country_code,item_code,sector,flag,org_year,next_dir,aggregate_level,get_next)
elif no_harvest:
flag = ["No data"]
a = None
areas_harvested = {"T":pasture_area+crop_area,"P":pasture_area,"P_ML":pasture_area_ML,"P_P":pasture_area_P,"C":crop_area}
flag = ''.join(flag)
#flag = '' #flag routine is buggered if get_next=True
return areas_harvested,flag
def get_import_export(year,country_code,item_code,flag=[]):
"""
Get quantities of item_code imported and exported by the given country in the given year.
Note: This function does not yet work on live animal codes.
"""
(imports,exports) = (0.0,0.0)
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':{'$in':import_codes+export_codes}}
fields = {'elementcode':1,'value':1}
qry,f = find(table_tradecropslivestock,spec,fields)
for rec in qry:
if rec['elementcode'] in import_codes:
imports = rec['value']
elif rec['elementcode'] in export_codes:
exports = rec['value']
else:
print rec['elementcode']
raise ValueError
return imports,exports
def get_area_harvested(year,country_code,item_code,sector="total",flag=[],org_year=None,next_dir=0,aggregate_level=0,get_next=False,from_db=False):
"""
Get the land area harvested for the given primary item code (see get_source_codes) or live animal code.
For primary livestock items, the area is given by the sum of the pasture area associated with the livestock animal
(see get_pasture_areas) and the crop area associated with feed (see get_feed_crop_area)
For live animals (livestock), the item code is mapped to the corresponding animal
carcass code (e.g. Cattle (866) -> Cattle meat (867), Chicken (1057) -> Chicken meat (1058)...)
then treated as a primary livestock (meat) item.
-'year' is the year for which to get the yield.
-'country_code' specifies the country for which to get the yield.
-'item_code' specifies the primary item whose yield is to be calculated.
-'get_next' specifies whether to get the next available record if
none exists for the specified year.
-'aggregate' specifies whether to average over the country's (sub-)continent
if none exists for the specified country.
-'pasture_areas' can be pre-calculated using get_pasture_areas, or if None
is provided it will be calculated here.
Note: It may be necesarry to get country mappings for the country_code before calling this function.
Note: Return value is in units of Ha
"""
ret_flag = ''
fields = {'value':1}
if item_code in crop_codes:
return get_crop_area_harvested(year,country_code,item_code,from_db=from_db)
elif item_code in livestockprimary_codes:
return get_livestockprimary_area_harvested(year,country_code,item_code,sector,from_db=from_db)
elif item_code in livestock_codes:
item_code = livestock_reverse_mappings[item_code]
return get_livestockprimary_area_harvested(year,country_code,item_code,sector,from_db=from_db)
else:
raise ValueError
return a,flag
def fetch_yield(year,country_code,item_code,pasture_as_feed=True):
"""
Get the yield for the given primary item code (see get_source_codes) or live animal code
or compound feed code. This value is fetched directly from a database table produced by
iterating get_yield() over all included itemcodes.
Note: It may be necesarry to get country mappings for the country_code before calling this function.
"""
spec = {'year':year,'countrycode':country_code,'itemcode':item_code,'elementcode':yield_code}
fields = {'value':1,'flag':1}
rec,flag = find_one(table_yields, spec, fields)
if rec is None:
return 0.0,"no data"
return rec['value'],rec['flag']
def get_yield(year,country_code,item_code,sector="total",imports=True,exports=True,cull=False,flag=[],org_year=None,next_dir=0,aggregate_level=0,from_db=True):
"""
Get the yield for the given primary item code (see get_source_codes) or live animal code.
For primary livestock items, the yield is calculated as the production
divided by the sum of the pasture area associated with the livestock animal
(see get_pasture_areas) and the crop area associated with feed (see get_feed_crop_area)
For live animals (livestock), the item code is mapped to the corresponding animal
carcass code (e.g. Cattle (866) -> Cattle meat (867), Chicken (1057) -> Chicken meat (1058)...)
then treated as a primary livestock (meat) item.
-'year' is the year for which to get the yield.
-'country_code' specifies the country for which to get the yield.
-'item_code' specifies the primary item whose yield is to be calculated.
-'get_next' specifies whether to get the next available record if
none exists for the specified year.
-'aggregate' specifies whether to average over the country's (sub-)continent
if none exists for the specified country.
-'pasture_areas' can be pre-calculated using get_pasture_areas, or if None
is provided it will be calculated here.
-'pasture_mode' specifies how to compute the "harvested area" for livestock products.
It may be either 'feed' or 'stock' (see get_area_harvested).
Note: Country mappings are automatically applied.
"""
ret_flag = ''
if country_code in country_mappings:
country_code = country_mappings[country_code]
if item_code in crop_codes:
return get_crop_yield(year,country_code,item_code,from_db=from_db)
elif item_code in livestockprimary_codes:
return get_livestockprimary_yield(year,country_code,item_code,imports=imports,exports=exports,cull=cull,from_db=from_db)
elif item_code in livestock_codes:
item_code = livestock_reverse_mappings[item_code]
return get_livestockprimary_yield(year,country_code,item_code,imports=imports,exports=exports,cull=cull,from_db=from_db)
else:
print "Invalid item code",item_code,"for get_yield()"
raise ValueError
def get_all_countries(struct = 'list'):
"""
Returns either a dictionary or list of all countries.
"""
collection = db.countries
return get_countries(collection,struct)
def get_producing_countries(struct = 'list'):
"""
Returns either a dictionary or list of countries listed as producers.
"""
collection = db.producers
return get_countries(collection,struct)
def get_trade_reporter_countries(struct = 'list'):
"""
Returns either a dictionary or list of all countries that report trade.
"""
collection = db.reporters
return get_countries(collection,struct)
def get_trade_partner_countries(struct = 'list'):
"""
Returns either a dictionary or list of all countries that are trade partners.
"""
collection = db.partners
return get_countries(collection,struct)
def get_balancing_countries(struct = 'list'):
"""
Returns either a dictionary or list of all countries that report commodity balances.
"""
collection = db.balancers
return get_countries(collection,struct)
def get_countries(collection,struct):
"""
Returns either a dictionary of countrycode:country pairs (if struct='dict'),
or a dictionary of country:countrycode pairs (if struct='dict2'),
or a list of countrycodes (if struct='list'),
or a list of countries (if struct='list2'),
"""
if struct=='list':
return [rec['countrycode'] for rec in collection.find()]
elif struct=='dict':
return {rec['countrycode']:rec['country'] for rec in collection.find()}
elif struct=='list2':
return [rec['country'] for rec in collection.find()]
elif struct=='dict2':
return {rec['country']:rec['countrycode'] for rec in collection.find()}
else:
raise ValueError
def get_country_region(country_code,level=1):
"""
Fetch the sub-continent or continent for the given country code.
Returns the aggregate (region) code corresponding to the smallest
geographical unit (i.e. largest aggregate code).
level can be 1 (sub-continent) or 2 (continent) or 3 (world)
"""
if level==3:
return world_code
if country_code in country_mappings:
country_code = country_mappings[country_code]
if country_code >= world_code:
return country_code
qry = db.countryaggregates.find({'aggregatecode':{'$lt':5600},'countrycode':country_code},{'aggregatecode':1},sort=[('aggregatecode',-1)])
if level==2:
qry.next()
try:
region_code = qry.next()['aggregatecode']
except StopIteration:
print country_code
raise StopIteration
#aggregate_code = db.countryaggregates.find_one({'aggregatecode':{'$lt':5600},'countrycode':country_code},{'aggregatecode':1},sort=[('aggregatecode',-1)])['aggregatecode'] if country_code < world_code else country_code #country codes >= 5000 are already aggregate codes
return region_code
def get_country_mappings():
"""
Returns a dictionary of key:value pairs that map special countries (usually trade
partners that aren't also producers) to associated countries or regions.
(e.g. China, mainland (41) gets mapped to China (351)).
"""
return {mapping['fromcode']:mapping['tocode'] for mapping in db.countrymappings.find()}
def get_crop_codes():
"""
Returns a list of item codes for all crops produced.
"""
return [rec['itemcode'] for rec in db.cropsproduced.find()]
def get_livestockprimary_codes():
"""
Returns a list of item codes for all primary livestock commodities.
"""
return [rec['itemcode'] for rec in db.livestockprimaryproduced.find()]
def get_livestock_codes():
"""
Returns a list of item codes for all primary livestock commodities.
"""
return [rec['itemcode'] for rec in db.liveanimalsproduced.find()]
def get_livestock_mappings():
"""
Returns a dictionary of key:value pairs where key is a primary livestock itemcode
and value is the corresponding live animal code.
"""
return {mapping['fromcode']:mapping['tocode'] for mapping in db.livestockmappings.find()}
def find(collection, spec, fields=None, sort=None, aggregate=None, flag=''):
"""
Fetch cursor to lfaodb records.
- 'collection' is the mongo collection to query (e.g. db.productioncrops)
- 'spec' is a dictionary of conditions to match against (e.g. {'year':2001, 'countrycode':231, 'itemcode':56})
- 'fields' is a dictionary of fields to fetch (e.g. {'elementcode':1,'value':1})
- 'sort' is a list of tuples (field, order) used to sort the query (e.g. [('year',1)]). order is 1=ascending or -1=descending)
- 'aggregate' is the key corresponding to the "country"code over which to aggregate
if the cursor comes up empty. Typically one of 'countrycode', 'reportercode', or 'partnercode'
If aggregate is not None and the initial query comes up empty, the db is re-queried on the
specified country's (sub-)continent.
"""
qry = collection.find(spec,fields=fields,sort=sort)
try:
qry.next()
qry.rewind()
except StopIteration:
#qry = None
if aggregate is not None and spec[aggregate]<world_code: #5000 is where geo-aggregate codes begin.
region_code = get_country_region(spec[aggregate])
spec[aggregate] = region_code
flag=flag+'a'
return find(collection,spec,fields,sort,None,flag)
return qry,flag
def find_one(collection, spec, fields=None, sort=[], get_next=False, aggregate=None, flag=''):
"""
Fetch one to lfaodb record.
- 'collection' is the mongo collection to query (e.g. db.productioncrops)
- 'spec' is a dictionary of conditions to match against (e.g. {'year':2001, 'countrycode':231, 'itemcode':56})
- 'fields' is a dictionary of fields to fetch (e.g. {'elementcode':1,'value':1})
- 'sort' is a list of tuples (field, order) used to sort the query (e.g. [('year',1)]). order is 1=ascending or -1=descending)
- 'get_next' is a tuple (field,dir). If no record is found, then the next available record is fetched from a cursor
sorted by field in the direction dir (e.g. ('year','$gt') ).
- 'aggregate' is the field corresponding to the "country"code over which to aggregate
if the cursor comes up empty. Typically one of 'countrycode', 'reportercode', or 'partnercode'
If get_next is not None and the initial query comes up empty, then attempt is made to get the next available
record in the direction specified by get_next_order. This takes precedence over aggregate.
If aggregate is not None and the initial query comes up empty, the db is re-queried on the
specified country's (sub-)continent.
Returns None if empty query is ultimately fetched
"""
rec = collection.find_one(spec,fields=fields,sort=sort)
if rec is None and get_next:
try:
spec['year'] = {'$gt':spec['year']}
sort = sort + [('year', 1)]
qry,f = find(collection, spec, fields, sort, aggregate)
rec = qry.next()
flag = flag+f+'n'
except StopIteration:
try:
spec['year'] = {'$lt':spec['year']['$gt']}
sort = sort + [('year', -1)]
qry,f = find(collection, spec, fields, sort, aggregate)
rec = qry.next()
flag = flag+f+'p'
except StopIteration:
rec = None
if aggregate is not None and spec[aggregate]<world_code: #5000 is where geo-aggregate codes begin.
region_code = get_country_region(spec[aggregate],0)
spec[aggregate] = region_code
flag = flag+'a'
rec = find_one(collection,spec,fields,sort,get_next,None,flag)
if rec is None:
region_code = get_country_region(spec[aggregate],1)
spec[aggregate] = region_code
flag = flag+'a'
rec = find_one(collection,spec,fields,sort,get_next,None,flag)
if rec is None:
spec[aggregate] = world_code
flag = flag+'a'
rec = find_one(collection,spec,fields,sort,get_next,None,flag)
return rec,flag
#def find_one(collection, spec, fields=None, sort=[], get_next=None, aggregate=None, flag=''):
"""
Fetch one to lfaodb record.
- 'collection' is the mongo collection to query (e.g. db.productioncrops)
- 'spec' is a dictionary of conditions to match against (e.g. {'year':2001, 'countrycode':231, 'itemcode':56})
- 'fields' is a dictionary of fields to fetch (e.g. {'elementcode':1,'value':1})
- 'sort' is a list of tuples (field, order) used to sort the query (e.g. [('year',1)]). order is 1=ascending or -1=descending)
- 'get_next' is a tuple (field,dir). If no record is found, then the next available record is fetched from a cursor
sorted by field in the direction dir (e.g. ('year','$gt') ).
- 'aggregate' is the field corresponding to the "country"code over which to aggregate
if the cursor comes up empty. Typically one of 'countrycode', 'reportercode', or 'partnercode'
If get_next is not None and the initial query comes up empty, then attempt is made to get the next available
record in the direction specified by get_next_order. This takes precedence over aggregate.
If aggregate is not None and the initial query comes up empty, the db is re-queried on the
specified country's (sub-)continent.
Returns None if empty query is ultimately fetched
"""
# rec = collection.find_one(spec,fields=fields,sort=sort)
# if rec is None and get_next is not None:
# spec[get_next[0]] = {get_next[1]:spec[get_next[0]]}
# order = 1 if get_next[1]=='$gt' else -1
# sort = sort + [(get_next[0], order)]
# qry,f = find(collection, spec, fields, sort, aggregate)
# try:
# rec = qry.next()
# flag = flag+f+'n'
# except StopIteration:
# rec = None
# if aggregate is not None and spec[aggregate]<world_code: #5000 is where geo-aggregate codes begin.
# region_code = get_country_region(spec[aggregate])
# spec[aggregate] = region_code
# flag = flag+'a'
# return find_one(collection,spec,fields,sort,get_next,None,flag)
# return rec,flag
def find_sum(collection,spec,field,get_next=False,reverse=False,group='year',flag=''):
"""
Sum over the fields of a query
- 'collection' is the collection to query on.
- 'spec' is a dictionary of conditions to match.
- 'field' the field to sum over (e.g. 'value' (note the $))
- 'get_next' specifies if you want to fetch the next available record if
no match is found (i.e. next available according to group).
- 'reverse': If True then reverse the sense of get_next.
- 'group' is a field to group by (e.g. 'year' (note the $))
Returns 0 if no match is found.
"""
result = collection.aggregate([{'$match':spec},{'$group':{'_id':'$'+group,'total':{'$sum':'$'+field}}}])['result']
result = sorted(result,key=lambda k: k['_id'],reverse=reverse)
try:
s = result[0]['total']
except IndexError:
s = 0
if get_next:
get_next_dir = '$gt' if not reverse else '$lt'
spec[group] = {get_next_dir:spec[group]}
flag = flag+'n'
return find_sum(collection, spec, field, False, reverse, group,flag)
return s,flag
#Database connection
connection = Connection()
db = connection.lfaodb
#Database collection objects
table_productioncrops = db.productioncrops
table_productioncropsprocessed = db.productioncropsprocessed
table_productionlivestock = db.productionlivestock
table_productionlivestockprimary = db.productionlivestockprimary
table_livestockproductionnoadj = db.livestockproductionnoadj
table_livestockproductionexport = db.livestockproductionexport
table_livestockproductionimportexport = db.livestockproductionimportexport
table_livestockproductionimportexportcull = db.livestockproductionimportexportcull
table_liveanimalproduction = db.liveanimalproduction
table_productionlivestockprocessed = db.productionlivestockprocessed
table_tradecropslivestock = db.tradecropslivestock
table_tradeliveanimals = db.tradeliveanimals
table_tradematrix = db.tradematrix
table_commoditybalance = db.commoditybalance
table_foodbalance = db.foodbalancesheets
table_cropsproduced = db.cropsproduced
table_livestockproduced = db.livestockproduced
table_livestockprimaryproduced = db.livestockprimaryproduced
table_livestockareaharvested = db.livestockareaharvested
table_livestockyields = db.livestockyieldsimportexport
table_countries = db.countries
table_countrymappings = db.countrymappings
table_producers = db.producers
table_reporters = db.reporters
table_partners = db.partners
table_balancers = db.balancers
table_livestockmappings = db.livestockmappings
table_livestockunits = db.livestockunits
table_cullrates = db.cullrates
table_commoditytrees = db.commoditytrees
table_feedtodomesticratio = db.feedtodomestic
table_feedmixes = db.feedmixes
table_feedfoodfractions = db.feedfoodfractions
table_feedshares = db.feedshares
table_feedconversion = db.feedconversion
table_feedssr = db.feedssr
table_ssr = db.ssr
table_feedconversionparams = db.feedconversionparams
table_systemproductionfractions = db.systemproductionfractions
table_systemanimalfractions = db.systemanimalfractions
table_systemslaughterfractions = db.systemslaughterfractions
table_systemareafractions = db.systemareafractions
table_land = db.land
table_agriland = db.agriland
table_cropland = db.cropland
table_agrilandfraction = db.agrilandfraction
table_pastureareas = db.pastureareas
table_population = db.population
table_cropyields = db.cropyields
table_cropproduction= db.cropproduction
table_cropareaharvested = db.cropareaharvested
#Other constants
min_year = 1961
max_year = 2010
export_group = 91
export_codes = [5910,5909,5908,5907]
export_code = 5910 # this one is for quantitiy in tonnes
export_code = 5911 # this one is for quantitiy in 1000 tonnes (foodbalancesheets)
import_group = 61
import_codes = [5610,5609,5608,5607]
import_code = 5610 # this one is for quantitiy in tonnes
import_code_fb = 5611 # this one is for quantitiy in 1000 tonnes (foodbalancesheets)
yield_code = 5419
production_code = 5510
production_code_fb = 5511
carcass_codes = [5417,5424]
Hg2tonnes = 0.0001
dg2tonnes = 0.0000001
byproduct_codes = [1,3]
balance_reporters = get_balancing_countries()
cereal_code_balance = 2905 #in commoditybalance table
cereal_code_production = 1717 #in production table
feed_code = 5520 #feed element in commoditybalance table
feed_code_fb = 5521 #feed element in commoditybalance table
food_supply_code_fb = 664 #food supply (kcal/capita) element in foodbalancesheets table
food_code = 5141 #food element in commoditybalance table
food_code_fb = 5142 #food element in foodbalance table
production_code_balance = 5511 #production element in commoditybalance table
import_code_balance = 5611 #import element in commoditybalance table
domestic_supply_code = 5300
domestic_supply_code_fb = 5301
population_item_code = 3010
population_element_code = 511
agricultural_land_code = 6610
cropland_codes = [6650,6621]
pasture_codes = [6655, 6633] #temporary and permanent pastures
area_code = 5110 #or is it 5312???
area_harvested_code = 5312
#milk_codes = [951,882,1020,982,1130,1062,1091]#,987 #fresh milk and eggs codes in productionlivestockprimary
#milk_animal_codes = [946,866,1016,976,1126,1057,1083]#,976 #milk-or-egg-producing animal codes in productionlivestock
#milk_animal_number_codes = [5318,5313] #code for the number of animals producing milk
cattle_codes = [867]
livestock_mappings = get_livestock_mappings()
livestock_reverse_mappings = {y:x for x,y in livestock_mappings.iteritems()}
feed_categories = { #keys are food groups, first tuple entry is list of included itemcodes from commiditybalance, second tuple entry is list of corresponding itemcodes from productioncrops, third tuple entry (if present) is conversion factor to primary crop.
"cereal":([2511,2804,2513,2514,2515,2516,2517,2518,2520],
[15,27,44,56,71,75,79,83,108],[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]),
"roots":([2531,2532,2533,2534,2535],[116,125,122,149,137],[1.0,1.0,1.0,1.0,1.0]),
"sugarcrops":([2536,2537],[156,157],[1.0,1.0]),
"sugar":([2827],[156],[0.11]),
"pulses":([2546,2547,2549],[176,187,191],[1.0,1.0,1.0]),
"nuts":([2551],[1729],[1.0]),
"oilcrops":([2555,2820,2557,2558,2559,2560,2561,2563,2570],[236,242,267,270,328,249,289,260,339],[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]),
"oil":([2571,2572,2573,2574,2575,2576,2578,2579,2586],[236,242,267,270,328,254,249,289,339],[0.18,0.30,0.41,0.38,0.10,0.19,0.13,0.43,0.3]),
"fruitnveg":([2601,2602,2605,2611,2612,2613,2614,2615,2616,2618,2619,2620,2625],[388,403,358,490,497,507,512,486,489,574,577,560,619],[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0])}
feed_items_in_balance = feed_categories["cereal"][0]+feed_categories["roots"][0]+feed_categories["sugarcrops"][0]+feed_categories["sugar"][0]+feed_categories["pulses"][0]+feed_categories["nuts"][0]+feed_categories["oilcrops"][0]+feed_categories["oil"][0]+feed_categories["fruitnveg"][0]
feed_items_in_production = feed_categories["cereal"][1]+feed_categories["roots"][1]+feed_categories["sugarcrops"][1]+feed_categories["sugar"][1]+feed_categories["pulses"][1]+feed_categories["nuts"][1]+feed_categories["oilcrops"][1]+feed_categories["oil"][1]+feed_categories["fruitnveg"][1]
feed_items_conversions = feed_categories["cereal"][2]+feed_categories["roots"][2]+feed_categories["sugarcrops"][2]+feed_categories["sugar"][2]+feed_categories["pulses"][2]+feed_categories["nuts"][2]+feed_categories["oilcrops"][2]+feed_categories["oil"][2]+feed_categories["fruitnveg"][2]
feed_balance_production_mappings = {v[0]:v[1] for v in zip(feed_items_in_balance,feed_items_in_production)}
feed_production_balance_mappings = {v[1]:v[0] for v in zip(feed_items_in_balance,feed_items_in_production)}
bovine_meat_codes = [867,947,1097,1108,1124,1127]#[867,947,977,1017,1097,1108,1124,1127]
bovine_codes = [866,946,1096,1107,1110,1126]
ovine_meat_codes = [977,1017]
ovine_codes = [976,1016]
milk_codes = [882,951,982,1020,1130]
pig_meat_codes = [1035]
pig_codes = [1034]
poultry_meat_codes = [1058,1069,1073,1080,1089]
poultry_codes = [1057,1068,1072,1079,1083]
egg_codes = [1062,1091]
meat_animal_mappings = {867:866,947:946,1097:1096,1108:1107,1124:1110,1127:1126,977:976,1017:1016,1035:1034,1058:1057,1069:1068,1073:1072,1080:1079,1089:1083}
meat_codes = meat_animal_mappings.keys()
animal_meat_mappings = {v:k for k,v in meat_animal_mappings.iteritems()}
milkeggs_meat_mappings = {882:867,951:947,982:977,1020:1017,1062:1058,1130:1127,1091:1069}
meat_milkeggs_mappings = {867:882,947:951,977:982,1017:1020,1058:1062,1127:1130,1069:1091}
milkeggs_animal_mappings = {882:866,951:946,982:976,1020:1016,1062:1057,1130:1126,1091:1068}
milkeggsmeat_animal_mappings = dict(milkeggs_animal_mappings.items()+meat_animal_mappings.items())
animal_milkeggs_mappings = {v:k for k,v in milkeggs_animal_mappings.iteritems()}
producing_animals_group = 31
producing_animals_codes = [5320,5322,5318,5321,5313,5323,5319,5314]
khead_codes = [5321,5313,5323]
milking_codes = [5318]
laying_codes = [5313]
processed_codes = [5130]
items_that_use_pasture = bovine_meat_codes+ovine_meat_codes+milk_codes
#the following is for items in cropproduction that aren't in tradecropslivestock
trade_to_production_mappings = {27:(38,0.637),254:(258,0.0276),277:(278,0.1),305:(306,0.15),310:(311,0.66),328:(331,0.1),542:(515,1.0),674:(677,1.0)}
fodder_to_crop_mappings = {637:83,638:71,644:358,645:394,648:426}
#feedcode_mappings_production = {0:[1717], 1:[638,639,640,641,642,643], 2:[1720,1726,1732], 3:[1735], 4:[1726,1732]}
#feedcode_mappings_balance = {0:[2905],1:[638,639,640,641,642,643], 2:[2907,2913,2911], 3:[2918], 4:[2913,2911]}
cmpndfeed_mappings = {840:867,841:1058,842:1035,845:1058}
#landless_animal_codes = [1034,1057,1183,1089,1069,1163,1073,1062,1067,1182,1084,1094,1070,1077,1055,1144,1154,1087,1151,1167,1091,1092,1083,1141,1185,1195,1176,999,1080]
#get the countrymappings
region_codes = {5000:"World",5101:"Eastern Africa",5102:"Middle Africa",5103:"Northern Africa",5104:"Southern Africa",5105:"Western Africa",5203:"Northern America",5204:"Central America",5206:"Carribbean",5207:"South America",5301:"Central Asia",5302:"Eastern Asia",5303:"Southern Asia",5304:"South-Eastern Asia",5305:"Western Asia",5401:"Eastern Europe",5402:"Northern Europe",5403:"Southern Europe",5404:"Western Europe",5501:"Australia and New Zealand",5502:"Melanesia",5503:"Micronesia",5504:"Polynesia",5100:"Africa",5200:"Americas",5300:"Asia",5400:"Europe",5500:"Oceania",5706:"European Union"}#,5600:"Antarctic Region"
continent_codes = {5100:"Africa",5200:"Americas",5300:"Asia",5400:"Europe",5500:"Oceania",}
world_code = 5000
china_producing_code = 351
china_trade_code = 357
country_mappings = get_country_mappings()
crop_codes = get_crop_codes()
livestockprimary_codes = get_livestockprimary_codes()
livestock_codes = get_livestock_codes()
primary2commodity_mappings = {
515:2617,
486:2615,
44:2513,
176:2546,
125:2532,
89:2520,
92:2520,
94:2520,
97:2520,
101:2520,
103:2520,
108:2520,
512:2614,
698:2642,
661:2633,
252:2578,
249:2560,
656:2630,
331:2575,
577:2619,
521:2625,
523:2625,
526:2625,
530:2625,
531:2625,
534:2625,
536:2625,
541:2625,
542:2625,
544:2625,
547:2625,
549:2625,
550:2625,
552:2625,
554:2625,
558:2625,
567:2625,
568:2625,
569:2625,
571:2625,
572:2625,
587:2625,
591:2625,
592:2625,
600:2625,
603:2625,
619:2625,
507:2613,
560:2620,
244:2572,
242:2556,
497:2612,
56:2514,
60:2582,
79:2517,
216:2551,
217:2551,
220:2551,
221:2551,
222:2551,
223:2551,
224:2551,
225:2551,
226:2551,
75:2516,
#excluded 2586
263:2570,
265:2570,
275:2570,
277:2570,
280:2570,
296:2570,
299:2570,
305:2570,
310:2570,
311:2570,
312:2570,
333:2570,
336:2570,
339:2570,
261:2580,
260:2563,
403:2602,
490:2611,
257:2577,
258:2576,
187:2547,
687:2640,
689:2641,
574:2618,
489:2616,
116:2531,
181:2549,
191:2549,
195:2549,
197:2549,
201:2549,
203:2549,
205:2549,
210:2549,
211:2549,
271:2574,
293:2574,
27:2805,
36:2581,
135:2534,
136:2534,
149:2534,
71:2515,
289:2561,
290:2579,
83:2518,
237:2571,
236:2555,
692:2645,
693:2645,
702:2645,
711:2645,
720:2645,
723:2645,
158:2542,
159:2542,
157:2537,
156:2536,
163:2541,
267:2557,
268:2573,
122:2533,
#Sweeteners, Other excluded
667:2635,
388:2601,
358:2605,
366:2605,
367:2605,
372:2605,
373:2605,
378:2605,
393:2605,
394:2605,
397:2605,
399:2605,
401:2605,
402:2605,
406:2605,
407:2605,
414:2605,
417:2605,
420:2605,
423:2605,
426:2605,
430:2605,
447:2605,
449:2605,
459:2605,
461:2605,
463:2605,
567:2605,
568:2605,
15:2511,
564:2644,
137:2535,
867:2731,
#Butter, Ghee excluded
1062:2744,
1182:2745,
1089:2735,
1097:2735,
1108:2735,
1124:2735,
1111:2735,
1127:2735,
1141:2735,
1151:2735,
1158:2735,
1163:2735,
882:2848,
951:2848,
1020:2848,
1089:2848,
1130:2848,
982:2848,
977:2732,
1035:2733,
1058:2734,
1069:2734,
1073:2734,
1080:2734,
}
| [
"chris.pagnutti@gmail.com"
] | chris.pagnutti@gmail.com |
74a24c2a8ac44625130847009d45e50976a7bebd | 8ef96160197ccd382328a54893c3f07d53db3774 | /setup.py | 9e2d39086985a67a4106c37876c5aafdb8ca20cd | [] | no_license | derekdreery/py3status | c74bd6980d2b1d2517788d169b1f7a64ab26bcf6 | cecf61d7cc8cc2a056de699e1d2216c20ad486ec | refs/heads/master | 2020-12-03T09:21:01.623187 | 2015-10-11T14:55:57 | 2015-10-11T14:55:57 | 44,056,390 | 0 | 1 | null | 2015-10-11T14:57:07 | 2015-10-11T14:57:06 | null | UTF-8 | Python | false | false | 1,513 | py | """
py3status
"""
import os
from setuptools import find_packages, setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='py3status',
version='2.6',
author='Ultrabug',
author_email='ultrabug@ultrabug.net',
description='py3status is an extensible i3status wrapper written in python',
long_description=read('README.rst'),
url='https://github.com/ultrabug/py3status',
download_url='https://github.com/ultrabug/py3status/tags',
license='BSD',
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=[],
entry_points={
'console_scripts': [
'py3status = py3status:main',
]
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
"ultrabug@gentoo.org"
] | ultrabug@gentoo.org |
7dc4983c30c707604da22c0935d8a156f400a0e1 | 8e4887e07aec84ef82271dd501d84f150df5b790 | /code/generate_GOP_PA.py | c498432d53f31216da8c3d9f9c5cbd9c57ca1c56 | [] | no_license | teddyterminal/gerrychain-proposal-analysis | 25a8be0927287a10d29f14dd97022c2ed7358629 | 0e39dbc1716252d6e7ac9e4a2c75dcca942df55c | refs/heads/master | 2020-06-22T20:26:22.573966 | 2019-08-26T18:40:44 | 2019-08-26T18:40:44 | 198,390,623 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,189 | py | import gerrychain
import functools
import numpy as np
import pandas as pd
import tqdm
import scipy.stats as ss
import sklearn as skl
from gerrychain import Graph, Partition, Election, GeographicPartition
from gerrychain.updaters import Tally, cut_edges
from gerrychain import MarkovChain
from gerrychain.constraints import contiguous
from gerrychain.proposals import *
from gerrychain.accept import always_accept
from gerrychain.proposals import recom
from functools import partial
from gerrychain.metrics import mean_median
from gerrychain.metrics import partisan_bias
from gerrychain.metrics import partisan_gini
from gerrychain.metrics import efficiency_gap
from gerrychain.metrics import polsby_popper
from gerrychain.metrics import wasted_votes
from multiprocessing import Pool
import random
m = 9
def pp(plan):
polsby = polsby_popper(plan)
popper = 0
for i in polsby:
popper += polsby[i]
return popper/len(polsby)
def republican_constraint(partition):
global m
if partition["SEN12"].wins("Rep") < m:
return False
m = partition["SEN12"].wins("Rep")
return True
def chain(iterations):
idef = random.randint(1, 10000)
graph = Graph.from_json("./PA_VTD.json")
election = Election("SEN12", {"Dem": "USS12D", "Rep": "USS12R"})
initial_partition = GeographicPartition(
graph,
assignment="2011_PLA_1",
updaters={
"cut_edges": cut_edges,
"population": Tally("TOT_POP", alias="population"),
"SEN12": election
}
)
ideal_population = sum(initial_partition["population"].values()) / len(initial_partition)
# We use functools.partial to bind the extra parameters (pop_col, pop_target, epsilon, node_repeats)
# of the recom proposal.
proposal = partial(recom,
pop_col="TOT_POP",
pop_target=ideal_population,
epsilon=0.02,
node_repeats=2
)
chain = MarkovChain(
proposal=proposal,
constraints=[republican_constraint],
accept=contiguous,
initial_state=initial_partition,
total_steps=iterations + 100
)
count = 0
metrics = []
boundary_nodes = []
boundary_weighted = []
for partition in chain.with_progress_bar():
mm = mean_median(partition["SEN12"])
p = pp(partition)
bias = partisan_bias(partition["SEN12"])
gini = partisan_gini(partition["SEN12"])
gap = efficiency_gap(partition["SEN12"])
cut = len(partition["cut_edges"])
if count >= 100:
metrics.append((mm, p, bias, gini, gap, cut))
nodes = [0]*8921
bnodes = [0]*8921
for edge in partition["cut_edges"]:
nodes[edge[0]] = 1
nodes[edge[1]] = 1
bnodes[edge[0]] += 1
bnodes[edge[1]] += 1
boundary_nodes.append(nodes)
boundary_weighted.append(bnodes)
if count % 100 == 0:
print(idef, count, mm, p, bias, gini, gap, cut, partition["SEN12"].wins("Rep"))
count += 1
return metrics, boundary_nodes, boundary_weighted
pool = Pool(processes = 24)
N = 51000
results = pool.map(chain, (N/24, N/24, N/24, N/24, N/24, N/24, N/24, N/24,
N/24, N/24, N/24, N/24, N/24, N/24, N/24, N/24,
N/24, N/24, N/24, N/24, N/24, N/24, N/24, N/24))
metrics = []
boundary_nodes = []
boundary_weighted = []
print("Compiling Data........")
for i in range(24):
metrics.extend(results[i][0])
boundary_nodes.extend(results[i][1])
boundary_weighted.extend(results[i][2])
print("Process " + str(i+1) + "/24.... DONE")
print("Writing Metrics........")
df = pd.DataFrame(metrics)
df.columns = ["Mean-Median", "Polsby-Popper", "Bias", "Gini", "Gap", "Cuts"]
df.to_csv("PA_GOP_50000_20190721")
print("Writing Boundary Nodes........")
df2 = pd.DataFrame(boundary_nodes)
df2.to_csv("PA_GOPBN_50000_20190721")
print("Writing Boundary Weighted........")
df3 = pd.DataFrame(boundary_weighted)
df3.to_csv("PA_GOPBW_50000_20190721")
| [
"rishabh@Rishabhs-MacBook-Pro.local"
] | rishabh@Rishabhs-MacBook-Pro.local |
2420bb9e1eda999e3964c83002eca57165520b1f | d741ba9357b0399ed66191a58a068afc9915692d | /pos_tagging/src/read_file.py | 65d122f901d8a2084f336b67d3ab4a4c4c5fcc9d | [] | no_license | lefreire/info_retrieval | 4975961bc47ec54183b21d2c3e8f7999c787d40e | a61c6db4ac963c6ba25d7fec267b07af78a107ca | refs/heads/master | 2022-12-09T01:16:39.783151 | 2020-09-01T01:12:18 | 2020-09-01T01:12:18 | 283,301,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | import logging
import numpy as np
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
class ReadWriteFile:
def __init__(self):
"""
Constructor
"""
logging.basicConfig(level=logging.INFO)
def read_file(self, file_path):
"""
Reading file in file_path
Parameters
----------
file_path: string
Returns
-------
sentences: list of string
"""
logging.info('Lendo arquivo de {0}'.format(file_path))
file_with_tags = open(file_path, "r", encoding='utf-8')
return file_with_tags.readlines()
def split_corpus_tags(self, corpus):
"""
Reading file in file_path
Parameters
----------
file_path: string
Returns
-------
sentences: list of string
"""
logging.info('Dividindo texto das tags')
sentences = []
tags = []
dict_tags = {}
for sentence in corpus:
sentence_tmp = sentence.replace("\n", '')
words_tmp = []
tags_tmp = []
words = sentence_tmp.split(" ")
for word in words:
tag_word = word.split("_")
if tag_word[0] == "": pass
else:
words_tmp.append(tag_word[0])
tags_tmp.append(tag_word[1])
if not tag_word[1] in dict_tags.keys():
dict_tags[tag_word[1]] = {}
dict_tags[tag_word[1]]['right'] = 0
dict_tags[tag_word[1]]['pred'] = 0
dict_tags[tag_word[1]]['pres'] = 1
else: dict_tags[tag_word[1]]['pres'] += 1
sentences.append(words_tmp)
tags.append(tags_tmp)
return sentences, tags, dict_tags
def divide_train_test(self, sentences, tags):
"""
Splitting sentences and tags in train and test
Parameters
----------
sentences: list of lists
tags: list of lists
Returns
-------
train: list with indexes
test: list with indexes
"""
logging.info('Dividindo dataset em 10 folds')
kf = KFold(n_splits=10)
train, test = [], []
for train_index, test_index in kf.split(sentences):
train.append(train_index)
test.append(test_index)
return train, test
def write_file(self, file_path, acc, dict_tags):
"""
Writing file with accuracy and informations about the tags
Parameters
----------
file_path: string
acc: list with floats
dict_tags: dictionary
Returns
-------
file: file in file_path
"""
logging.info('Escrevendo arquivo em {0}'.format(file_path))
file_write = open(file_path, "w")
file_write.write("Taxa de acerto geral: {0:.2f}%\n".format(np.mean(acc)*100))
for key in dict_tags.keys():
if dict_tags[key]['right'] > 0:
file_write.write("Taxas de acerto para a classe '{0}': {1:.2f}% Total da classe '{0}': {2:.2f}%\n".format(key,
(dict_tags[key]['pred']/dict_tags[key]['right'])*100,
(dict_tags[key]['right']/dict_tags[key]['pres'])*100))
else:
file_write.write("Taxas de acerto para a classe '{0}': Nao presente no corpus de teste\n".format(key))
file_write.close()
def read_and_split(self, file_path):
"""
Main method
"""
corpus = self.read_file(file_path)
sentences, tags, dict_tags = self.split_corpus_tags(corpus)
train, test = self.divide_train_test(sentences, tags)
sentences_train, sentences_test, tags_train, tags_test = [], [], [], []
for train_index, test_index in zip(train, test):
sentences_train.append(np.array(sentences)[train_index])
sentences_test.append(np.array(sentences)[test_index])
tags_train.append(np.array(tags)[train_index])
tags_test.append(np.array(tags)[test_index])
return dict_tags, sentences_train, tags_train, sentences_test, tags_test
| [
"letfreirefigueiredo@gmail.com"
] | letfreirefigueiredo@gmail.com |
87a8e58275794c7a3195b537a5cc332b338d0ded | dffb1f09530317095aa8e3cacca2b0133602172a | /utils/dist.py | a72fb30a849d250ba92ae2a2a0c6afd406e3b46e | [
"Apache-2.0"
] | permissive | apeabody/gdistcc | dc5d6241d5c1e680760e357f1c372d5e951174d0 | ae51cd1aa1195a6a7e7c04dfed747bb39e092d1f | refs/heads/master | 2021-04-15T03:35:59.989247 | 2016-06-23T00:47:31 | 2016-06-23T00:47:31 | 60,889,387 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | #!/usr/bin/env python
# dist.py - Identify Linux Distro
#
# Copyright 2016 Andrew Peabody. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
print("Distro: " + platform.linux_distribution()[0]);
print("Version: " + platform.linux_distribution()[1]);
| [
"apeabody@gmail.com"
] | apeabody@gmail.com |
3241e36296a797bb32de7bade4cf408a1666a17b | f307e8aa577655ae90a2f73208330f63e1182965 | /other_ideas.py | f1e725637746d3046c66e1498647d779fb9ce108 | [] | no_license | mumu/cube_finder | a4812c68cc2d52957904f9d631d6c15673c92a3a | f3ef51426a41b87583e871404d6af083b8aef3cb | refs/heads/master | 2020-12-10T22:45:17.051136 | 2016-12-05T06:13:46 | 2016-12-05T06:14:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,159 | py | import cv2, sys, time, random
import numpy as np
import math
from sklearn.cluster import MiniBatchKMeans
from skimage.segmentation import slic
from skimage import io
# http://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def calc_intersection(p1, p2):
# we only want perpendicular intersections
diff = abs(p1[1] - p2[1])
thresh = math.pi/32
if diff > math.pi/2 + thresh or diff < math.pi/2 - thresh:
return [[float("inf")], [float("inf")]]
A = np.array([[math.cos(p1[1]), math.sin(p1[1])],
[math.cos(p2[1]), math.sin(p2[1])]])
# check this otherwise we get a numpy error since its a matrix with 2 duplicate columns
if A[0][0] == A[1][0] and A[0][1] == A[1][1]:
return [[float("inf")], [float("inf")]]
b = np.array([[p1[0]], [p2[0]]])
# Solve AX = b with X = A^-1b
A_inv = np.linalg.inv(A)
X = np.dot(A_inv, b)
# X = [[x], [y]], reshape to [x, y]
return X.ravel()
def find_clusters(points):
dist_thresh = 30
clusters = []
for p in points:
found_cluster = False
for i in range(len(clusters)):
center = clusters[i][0]
if math.hypot(center[0] - p[0], center[1] - p[1]) < dist_thresh:
clusters[i][1].append(p)
clusters[i][0] = np.mean(clusters[i][1], axis=0)
found_cluster = True
break
if not found_cluster:
clusters.append([p, [p]])
return np.array(clusters)[:, 0]
def find_all_intersections(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imwrite('gray.png', gray)
#edges = cv2.Canny(gray, 0, 100)
edges = auto_canny(img)
cv2.imwrite('edges.png', edges)
rows,cols,channels = img.shape
lines = cv2.HoughLines(edges,1,np.pi/180,40)
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),1)
cv2.imwrite('found_lines.png', img)
intersections = []
for i in range(len(lines)):
for j in range(i+1, len(lines)):
intersect = calc_intersection(lines[i][0], lines[j][0])
if intersect[0] >= 0 and intersect[0] <= cols and intersect[1] >= 0\
and intersect[1] <= rows:
intersections.append(intersect)
for intersect in intersections:
cv2.circle(img,(int(intersect[0]), int(intersect[1])),3,255,-1)
#clusterer = AffinityPropagation(damping=0.95)
#clusterer.fit(intersections)
#centers = clusterer.cluster_centers_
centers = find_clusters(intersections)
for center in centers:
cv2.circle(img, (int(center[0]), int(center[1])), 5, (0, 255, 0), -1)
return img
#def find_edges(img):
#blur = cv2.blur(img, (3,3))
#rows,cols,channels = blur.shape
#imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#edges = cv2.Canny(blur,100,200)
# edges = auto_canny(img)
# cv2.imwrite('edges.png', edges)
#kernel = np.ones((5,5),np.uint8)
#edges = cv2.dilate(edges, kernel)
"""
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges,1,np.pi/180,30,minLineLength,maxLineGap)
for line in lines:
#for x1,y1,x2,y2 in lines:
x1,y1,x2,y2=line[0]
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)"""
# return edges
def find_corners(img):
#blur = cv2.blur(img, (3,3))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
fast = cv2.FastFeatureDetector_create()
# find and draw the keypoints
kp = fast.detect(img,None)
img2 = cv2.drawKeypoints(img, kp, color=(255,0,0), outImage=img)
# shi-tomasi corner dectection
"""corners = cv2.goodFeaturesToTrack(gray,25,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
"""
# harris corner detection
"""gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
img[dst>0.001*dst.max()]=[0,0,255]"""
# return img2
def find_squares(img):
yellow = [(15,30), (125,145), (140,170)]
rows,cols,shape = img.shape
new_img = np.zeros((rows, cols)) * 255
for i in range(cols):
for j in range(rows):
pixel = img[j][i]
if pixel[0] > yellow[0][0] and pixel[0] < yellow[0][1] and pixel[1] > yellow[1][0] \
and pixel[1] < yellow[1][1] and pixel[2] > yellow[2][0] and pixel[2] < yellow[2][1]:
new_img[j][i] = 255
return new_img
def segment_img(img):
#from skimage.data import astronaut
#img = astronaut()
rows,cols,channels = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_small = cv2.resize(img, (cols/10, rows/10))
#clusterer = MiniBatchKMeans(n_clusters = 40)
#pixels = []
#for i in range(cols):
# for j in range(rows):
# pixels.append([i, j])
#clusters = clusterer.fit(gray)
#centers = clusters.cluster_centers_
#cv2.imwrite('clusters.png', centers)
#cv2.imwrite('small_img.png', img_small)
segments = slic(img_small, n_segments=60, compactness=10)
io.imshow(segments)
io.show()
return segments
def find_lines(img):
rows,cols,channels = img.shape
img_small = cv2.resize(img, (cols/5, rows/5), interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
laplac = cv2.Laplacian(gray,cv2.CV_8U)
laplac = cv2.normalize(laplac, laplac, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
#adapt_thresh = cv2.adaptiveThreshold(laplac,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,5,2)
#adapt_thresh = auto_canny(laplac)
ret,adapt_thresh = cv2.threshold(laplac,20,255,cv2.THRESH_BINARY)
#adapt_thresh = cv2.erode(adapt_thresh, np.ones((2,2)))
#adapt_thresh = cv2.morphologyEx(adapt_thresh, cv2.MORPH_CLOSE, np.ones((2,2)), iterations=4)
#edges = auto_canny(img_small)
#edges = cv2.Canny(cv2.blur(img, (3,3)), 0, 50)
#edges = cv2.resize(edges, (cols,rows))
#edges_dilated = cv2.dilate(edges, np.ones((5,5)), iterations=1)
#edges_opened = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, np.ones((5,5)))
"""
minLineLength = 20
maxLineGap = 10
lines = cv2.HoughLinesP(edges_dilated, 1, np.pi/180, 5, minLineLength, maxLineGap)
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(img_small, (x1, y1), (x2, y2), (0, 255, 0), 2)
"""
lines = cv2.HoughLines(adapt_thresh,1,np.pi/180,200)
if lines is not None:
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))*5
y1 = int(y0 + 1000*(a))*5
x2 = int(x0 - 1000*(-b))*5
y2 = int(y0 - 1000*(a))*5
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),1)
return img, adapt_thresh, laplac
def lbp(image):
rows,cols,channels = image.shape
small_img = cv2.resize(image, (cols/5,rows/5), interpolation=cv2.INTER_AREA)
num_points = 10
radius = 3
gray = cv2.cvtColor(small_img, cv2.COLOR_BGR2GRAY)
lbp = feature.local_binary_pattern(gray, num_points,
radius, method="uniform")
lbp = cv2.normalize(lbp, lbp, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
#ret, thresh = cv2.threshold(lbp, 127,255,cv2.THRESH_BINARY)
thresh = cv2.adaptiveThreshold(lbp,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
laplacian = cv2.Laplacian(gray,cv2.CV_64F)
laplacian = cv2.normalize(laplacian, laplacian, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
#ret,thresh = cv2.threshold(laplacian, 150,255, cv2.THRESH_BINARY)
laplacianx64f = cv2.Laplacian(gray,cv2.CV_64F)
abs_laplacian64f = np.absolute(laplacianx64f)
laplacian_8u = np.uint8(abs_laplacian64f)
ret,thresh = cv2.threshold(laplacian_8u, 20,255, cv2.THRESH_BINARY)
eroded = cv2.erode(thresh, np.ones((2,2)))
#thresh = cv2.adaptiveThreshold(laplacian_8u,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,11,2)
lines = cv2.HoughLines(eroded,1,np.pi/180,25)
if lines is not None:
for line in lines:
rho,theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(small_img,(x1,y1),(x2,y2),(255,0,0),1)
return small_img
if __name__ == '__main__':
img = cv2.imread(sys.argv[1])
#start = time.time()
#edges = find_edges(img)
#cv2.imwrite('edges.png', edges)
#end = time.time()
#print 'took', end - start
#corners = find_corners(img)
#cv2.imwrite('cube_edges.png', edges)
#cv2.imwrite('cube_corers.png', corners)
#squares = find_squares(img)
#cv2.imwrite('squares.png', squares)
seg = segment_img(img)
cv2.imwrite('segmented.png', seg)
"""
rows,cols,channels = img.shape
img_small = cv2.resize(img, (cols/10, rows/10))
points = find_all_intersections(img_small)
cv2.imwrite('intersections.png', points)
"""
| [
"ascott@hmc.edu"
] | ascott@hmc.edu |
50f5aad9b3a42dfea3d6b7b361068fcad4be6839 | 192b7e49e60ea04e97208ab4062ac7af904acd44 | /flask/app/main.py | 2fcae5d2e943e6812412af27df15cc9df0c93de8 | [] | no_license | seakmengc/cnn-image-classification | 1e42f9e482c868908c0393001199f2cdb203e338 | bea89b18447665401b0fddc369f3bc5bd7ee87d9 | refs/heads/main | 2023-08-01T05:16:38.455856 | 2021-09-24T16:11:04 | 2021-09-24T16:11:04 | 401,121,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | from flask import Flask, request, jsonify
from torch_utils import transform_image, get_prediction
app = Flask(__name__)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
def allowed_file(filename):
# xxx.png
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/predict', methods=['POST'])
def predict():
# 1) Load image
# 2) image -> tensor
# 3) predict
if request.method == 'POST':
file = request.files.get('file')
if file is None or file.filename == "":
return jsonify({'error': 'no file'})
if not allowed_file(file.filename):
return jsonify({'error': 'format not supported'})
# try:
img_bytes = file.read()
tensor = transform_image(img_bytes)
prediction = get_prediction(tensor)
print('prediction', prediction)
data = {'prediction': str(prediction), 'class_name': str(prediction)}
return jsonify(data)
# except:
# return jsonify({'error': 'error during prediction'})
if __name__ == '__main__':
app.run(debug=True)
| [
"c.seakmeng0603@gmail.com"
] | c.seakmeng0603@gmail.com |
2b60f88f7128b020f21fa8e9351b9fb82c26385d | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/gensim/corpora/lowcorpus.py | e293c998a14d288506947a9fd241acf64a343952 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 7,100 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Corpus in GibbsLda++ format of List-Of-Words.
"""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
from six import iterkeys
from six.moves import xrange, zip as izip
logger = logging.getLogger('gensim.corpora.lowcorpus')
def split_on_space(s):
return [word for word in utils.to_unicode(s).strip().split(' ') if word]
class LowCorpus(IndexedCorpus):
"""
List_Of_Words corpus handles input in GibbsLda++ format.
Quoting http://gibbslda.sourceforge.net/#3.2_Input_Data_Format::
Both data for training/estimating the model and new data (i.e., previously
unseen data) have the same format as follows:
[M]
[document1]
[document2]
...
[documentM]
in which the first line is the total number for documents [M]. Each line
after that is one document. [documenti] is the ith document of the dataset
that consists of a list of Ni words/terms.
[documenti] = [wordi1] [wordi2] ... [wordiNi]
in which all [wordij] (i=1..M, j=1..Ni) are text strings and they are separated
by the blank character.
"""
def __init__(self, fname, id2word=None, line2words=split_on_space):
"""
Initialize the corpus from a file.
`id2word` and `line2words` are optional parameters.
If provided, `id2word` is a dictionary mapping between word_ids (integers)
and words (strings). If not provided, the mapping is constructed from
the documents.
`line2words` is a function which converts lines into tokens. Defaults to
simple splitting on spaces.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.line2words = line2words # how to translate lines into words (simply split on space by default)
self.num_docs = self._calculate_num_docs()
if not id2word:
# build a list of all word types in the corpus (distinct words)
logger.info("extracting vocabulary from the corpus")
all_terms = set()
self.use_wordids = False # return documents as (word, wordCount) 2-tuples
for doc in self:
all_terms.update(word for word, wordCnt in doc)
all_terms = sorted(all_terms) # sort the list of all words; rank in that list = word's integer id
# build a mapping of word id(int) -> word (string)
self.id2word = dict(izip(xrange(len(all_terms)), all_terms))
else:
logger.info("using provided word mapping (%i ids)", len(id2word))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True # return documents as (wordIndex, wordCount) 2-tuples
logger.info(
"loaded corpus with %i documents and %i terms from %s",
self.num_docs, self.num_terms, fname
)
def _calculate_num_docs(self):
# the first line in input data is the number of documents (integer). throws exception on bad input.
with utils.smart_open(self.fname) as fin:
try:
result = int(next(fin))
except StopIteration:
result = 0
return result
def __len__(self):
return self.num_docs
def line2doc(self, line):
words = self.line2words(line)
if self.use_wordids:
# get all distinct terms in this document, ignore unknown words
uniq_words = set(words).intersection(iterkeys(self.word2id))
# the following creates a unique list of words *in the same order*
# as they were in the input. when iterating over the documents,
# the (word, count) pairs will appear in the same order as they
# were in the input (bar duplicates), which looks better.
# if this was not needed, we might as well have used useWords = set(words)
use_words, marker = [], set()
for word in words:
if (word in uniq_words) and (word not in marker):
use_words.append(word)
marker.add(word)
# construct a list of (wordIndex, wordFrequency) 2-tuples
doc = [(self.word2id.get(w), words.count(w)) for w in use_words]
else:
uniq_words = set(words)
# construct a list of (word, wordFrequency) 2-tuples
doc = [(w, words.count(w)) for w in uniq_words]
# return the document, then forget it and move on to the next one
# note that this way, only one doc is stored in memory at a time, not the whole corpus
return doc
def __iter__(self):
"""
Iterate over the corpus, returning one bag-of-words vector at a time.
"""
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
if lineno > 0: # ignore the first line = number of documents
yield self.line2doc(line)
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save a corpus in the List-of-words format.
This function is automatically called by `LowCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in List-Of-Words format into %s" % fname)
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8('%i\n' % len(corpus)))
for doc in corpus:
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s\n' % ' '.join(words)))
if truncated:
logger.warning(
"List-of-words format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""
Return the document stored at file position `offset`.
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
@property
def id2word(self):
return self._id2word
@id2word.setter
def id2word(self, val):
self._id2word = val
self.word2id = utils.revdict(val)
| [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
d95b85d157c5e47a6a21e27eabf4525b5afea52e | d0a84d97aaa8dcc2dff4a6b33ce98dee6d474496 | /com.CheckProofing/Test_Campaign_2021/scripts/python/Page/extract_images.py | 3b80f87b21db865b5932d0164080417339bd2fe7 | [] | no_license | ahmed-test001/python | 21a27248c4571a13c0ed4dccab256aede1beea3a | eab59b9a54fae1a51fbc18c391599eb3b0e28b3d | refs/heads/master | 2023-03-10T21:00:54.634028 | 2021-02-27T05:31:58 | 2021-02-27T05:31:58 | 342,778,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | # import json
# import re
# import os
# import sys
# import requests
# import pytesseract
# # import cv2
# from urllib.parse import urlparse
#
# from bs4 import BeautifulSoup
# from selenium.webdriver.support.wait import WebDriverWait
#
# from Test_Campaign_2021.scripts.python.Util_Data import ReadConfig
#
#
# class extract_images:
# output_dir = "../../data/output/"
#
# def __init__(self, driver):
# self.driver = driver
# self.wait = WebDriverWait(self.driver, 10)
#
# def check_key_exist(self, test_dict, key):
# try:
# value = test_dict[key]
# return True
# except KeyError:
# return False
#
# def extract_images(self):
#
# with open(ReadConfig.readFilePathData('FilePaths', 'url_list'), 'w') as f:
# urls = f.read().splitlines()
# contents = urls[0]
# input_html_file = BeautifulSoup(contents, 'html.parser')
# f.close()
# print("#################### Extract Images Start ####################")
# pytesseract.pytesseract.tesseract_cmd = (r"C:\\Program Files\\Tesseract-OCR\\tesseract.exe")
#
# png_images = input_html_file.find_all('img', {'src': re.compile('.png')})
# jpg_images = input_html_file.find_all('img', {'src': re.compile('.jpg')})
# ahref_links = []
# hyper_links_json = {}
# for image in jpg_images:
# d_cols = {}
# d_cols['src'] = image['src']
# source = urlparse(image['src'])
# print("Image Source: ", source)
# filename = os.path.basename(source.path)
# response = requests.get(image['src'])
# image_file = open(self.output_dir+"/proof_images/" + filename, "wb+")
# image_file.write(response.content)
# image_file.close()
# d_cols['filename'] = filename
# # if image['alt'] == "":
# # continue
# d_cols['alt'] = image['alt'] if self.check_key_exist(image, 'alt') else ""
# # d_cols['alt'] = image['alt']
# img = cv2.imread(self.output_dir+"/proof_images/" + filename)
# img = cv2.resize(img, None, fx=7, fy=7)
# data = pytesseract.image_to_string(img)
# d_cols['data'] = data.strip()
# ahref_links.append(d_cols)
#
# for image in png_images:
# d_cols = {}
# d_cols['src'] = image['src']
# source = urlparse(image['src'])
# print("Image Source: ", source)
# filename = os.path.basename(source.path)
# response = requests.get(image['src'])
# image_file = open(self.output_dir+"/proof_images/" + filename, "wb+")
# image_file.write(response.content)
# image_file.close()
# d_cols['filename'] = filename
#
# # if image['alt']=="":
# # continue
# d_cols['alt'] = image['alt'] if self.check_key_exist(image, 'alt') else ""
# # d_cols['alt'] = image['alt']
# img = cv2.imread(self.output_dir+"/proof_images/" + filename)
# img = cv2.resize(img, None, fx=7, fy=7)
# data = pytesseract.image_to_string(img)
# d_cols['data'] = data
# ahref_links.append(d_cols)
#
# # hyper_links_json['alerts'] = ahref_links
# # final_hyber_links = json.dumps(hyper_links_json, indent=4, sort_keys=False, ensure_ascii=False)
# # file = open(self.output_dir+"proof_files/" + "abc" + ".json", "w", encoding="utf-8")
# # # file = open(self.output_dir+"proof_files/" + self.output_file_name + '_' + '-'.join(self.filename.split('-')[-3:-1]) + ".json", "w", encoding="utf-8")
# # # file.write(final_hyber_links)
# # file.close()
# print("#################### Extract Images End ####################")
| [
"ahmedu.ferdous@gmail.com"
] | ahmedu.ferdous@gmail.com |
f089bbc45dcc07a87ffe5f88eff17e96d474e1a7 | a34272af011a08ba255f7e908423dae2ac6ecbb9 | /src/enums/token_type.py | 496f3af2aba40cfe55c2d691df5dd372f53e1cc8 | [] | no_license | CoderK/simple-interperter-language-study | 2fa3bf04854cc6ffa7ec78dcc057d9586f119054 | 7e4968eb67aa636d33ecc8dc54b18de29e2dfeb1 | refs/heads/master | 2021-01-13T16:04:10.476705 | 2017-01-07T10:33:51 | 2017-01-07T10:33:51 | 76,764,210 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from enum import Enum
class TokenType(Enum):
IDENTIFIER = 0,
NUMBER = 1,
MINUS = 2,
PLUS = 3,
MULTIPLY = 4
DIVISION = 5,
MOD = 6,
POWER = 7,
L_PAREN = 8,
R_PAREN = 9,
COMMA = 10,
CALL = 11,
FUNCTION = 12,
ASSIGN = 13,
END = 14
| [
"jeokrang@hanmail.net"
] | jeokrang@hanmail.net |
daf5f4f3813ecc08cd6a129e3d7837d01f694731 | 67827e1b58898eb261e0552288ca844ad54e800d | /FanFast.py | ef934bfa644c62eee825c55e1b72409be3c2de7a | [] | no_license | mbutkereit/Speech-to-RIOT | 938aca104a17bf5f0c71efdfa96403c3821f37df | fbb205025a9e811f445aca4ea8555657601486e2 | refs/heads/master | 2021-01-22T12:25:56.789968 | 2017-05-29T09:35:00 | 2017-05-29T09:35:00 | 92,725,459 | 0 | 0 | null | 2017-05-29T09:30:03 | 2017-05-29T09:30:03 | null | UTF-8 | Python | false | false | 392 | py | import re
import setFan
import json
WORDS = ["FAN", "FAST"]
file = open("hostnamesFans.json")
for line in file:
content = json.loads(line)
hostname = content["fans"]
def handle(text, mic, profile):
setFan.setFan(hostname, 0, "FAST")
mic.say("Okay, I am turning the fan to a fast frequency")
def isValid(text):
return bool(re.search(r'\bFAN\b', text, re.IGNORECASE))
| [
"arnemt@web.de"
] | arnemt@web.de |
6c470d79e3d96a4854aec735c4058b4423218ec8 | 9f5509aea6fe3808f6a7f5ec876e8fc5e19df7f6 | /sdk_test.py | 8a055d819b551a7381df9173b561ba1356402951 | [
"MIT"
] | permissive | zebra-kangaroo/petulant-turtle | 7fddfb85cb5646ce25c220bf1b02f33ee60b27b7 | 83119e09460fc0de858466ad3cd69206ab1f502f | refs/heads/master | 2020-05-22T13:07:53.673917 | 2015-07-29T19:57:27 | 2015-07-29T19:57:27 | 39,913,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | from wepay import WePay
# CLIENT_ID of your app
CLIENT_ID = ''
# CLIENT_SECRET of your app
CLIENT_SECRET = ''
# Production, Stage, Stage-internal, VM
TARGET_ENVIRONMENT = 'Production'
# ACCESS_TOKEN of your app
ACCESS_TOKEN = ''
# ACCOUNT_ID of your app
ACCOUNT_ID = ''
# Internal calls, set to to True for making internal calls
INTERNAL_CALLS = False
# Create the wepay API instance
wepay = WePay(TARGET_ENVIRONMENT, ACCESS_TOKEN, INTERNAL_CALLS)
# Call /user
user_reps = wepay.call('/user')
print(user_reps)
# Call /app
params = {"client_id": CLIENT_ID, "client_secret": CLIENT_SECRET}
app_reps = wepay.call('/app', params)
print(app_reps)
# Call /credit_card/create
params = {"client_id": CLIENT_ID, "user_name": "Bob Smith", "email": "test@example.com", "cc_number": "5496198584584769", "cvv": "123", "expiration_month": 4, "expiration_year": 2020, "address": {"address1": "test", "city": "test", "state": "CA", "country": "US", "zip": "94025"}}
call_reps = wepay.call('/credit_card/create', params)
print(call_reps)
# Call /credit_card GET
if 'credit_card_id' in call_reps:
params = {"client_id": CLIENT_ID, "client_secret": CLIENT_SECRET, "credit_card_id": call_reps["credit_card_id"]}
call_reps = wepay.call('/credit_card', params)
print(call_reps)
# Call /checkout/create
if 'credit_card_id' in call_reps:
params = {"account_id": ACCOUNT_ID, "short_description": "Donation to Smith Cancer Fund", "long_description": "This is a donation to help Bob Smith get the treatment", "type": "DONATION", "reference_id": "abc123", "amount": "100.75", "currency": "USD", "app_fee": "5.5", "fee_payer": "payee", "auto_capture": "false", "payment_method_id": call_reps["credit_card_id"], "payment_method_type": "credit_card"}
call_reps = wepay.call('/checkout/create', params)
print(call_reps)
# Set up for Internal calls
INTERNAL_CALLS = True
wepay_internal = WePay(TARGET_ENVIRONMENT, ACCESS_TOKEN, INTERNAL_CALLS)
# Call /internal/user/sample
if 'user_id' in user_reps:
params = {"user_id": user_reps["user_id"], "app_id": app_reps['client_id']}
internal_resp = wepay_internal.call('/user/sample', params)
print(internal_resp)
| [
"sankate@wepay.com"
] | sankate@wepay.com |
9b3313edbcd0fb8a462b7c1e8f8fbf9fd7a9714c | b8c1ffa5c522907e5f935f317b0638d51e2fbf90 | /Movielens_user_clustering_fuzzy_cmeans.py | e4f1d00422a67a38c767072b0a5869558e6597da | [] | no_license | Vijeta141/Major-Project-2 | f2d7d94896081e246d8f12d058935f7da3f6be8c | 2883f5edcc0a3e9bd1ccebb03584562c2520a3ed | refs/heads/master | 2020-03-09T00:40:56.029431 | 2018-04-28T14:09:14 | 2018-04-28T14:09:14 | 128,494,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,236 | py | import numpy as np
import time
import pickle
import skfuzzy as fuzz
from movielens import *
from sklearn.metrics import mean_squared_error
user = []
item = []
rating = []
rating_test = []
n_users = 0
n_items = 0
# Load the movie lens dataset into arrays
def load_data():
global user
global item
global rating
global rating_test
d = Dataset()
d.load_users("data/u10.user", user)
d.load_items("data/u.item", item)
d.load_ratings("data/u10.base", rating)
d.load_ratings("data/u10.test", rating_test)
def create_rating_matrix():
global n_users
global n_items
n_users = len(user)
n_items = len(item)
user_rating = np.zeros((n_users, n_items))
for r in rating:
user_rating[r.user_id-1][r.item_id-1] = r.rating
return user_rating
# Finds the average rating for each user and stores it in the user's object
def find_avg_rating_per_user(user_rating):
for i in range(n_users):
rated = np.nonzero(user_rating[i]) #np.nonzero returns indices of the elements that are non-zero.
n = len(rated[0])
if n != 0:
user[i].avg_r = np.mean(user_rating[i][rated])
else:
user[i].avg_r = 0
def cluster_users(user_rating):
user_rating_transposed = np.transpose(user_rating)
cntr, u_orig, _, _, _, _, _ = fuzz.cluster.cmeans(user_rating_transposed, 6, 2, error=0.005, maxiter=300)
labels = list(np.argmax(u_orig, axis=0) + 1)
return labels
def guess(user_id, item_id, labels, user_rating):
cluster_number = labels[user_id]
indices = [i for i, x in enumerate(labels) if x == cluster_number]
y = []
for user in indices:
x = user_rating[user][item_id]
y.append(x)
y = list(filter((0.0).__ne__, y))
if len(y) == 0:
return 0.0
else:
max_r = max(y,key=y.count)
return max_r
def guess_weighted(user_id, item_id, labels, user_rating):
distance = {}
ratings = [1.0,2.0,3.0,4.0,5.0]
cluster_number = labels[user_id]
indices = [i for i, x in enumerate(labels) if x == cluster_number]
scores = [0,0,0,0,0]
for i in indices:
if not 'i' in distance :
sum_d = sum((user_rating[user_id][j] - user_rating[i][j]) for j in range(0,n_items))
# sum_f = sum_d ** 0.5
lamb = 1/ (sum_d ** 2)
distance[i] = lamb
for j in ratings:
for i in indices:
if user_rating[i][item_id] == j:
scores[int(j-1)] += distance[i]
max_s = max(scores)
return float(scores.index(max_s) + 1)
def predict_user_rating(labels, user_rating):
user_rating_copy = np.copy(user_rating)
for i in range(0, n_users):
for j in range(0, n_items):
if user_rating_copy[i][j] == 0:
time.sleep(0.00005)
user_rating_copy[i][j] = guess(i, j, labels, user_rating)
pickle.dump(user_rating_copy, open("user_rating_movie_user_kmeans.pkl", "wb"))
return user_rating_copy
def create_test_matrix():
test = np.zeros((n_users, n_items))
for r in rating_test:
test[r.user_id - 1][r.item_id - 1] = r.rating
return test
def calculate_error(test, predicted_rating, labels):
# Predict ratings for u.test and find the mean squared error
y_true = []
y_pred = []
f = open('test_movie_fuzzy.txt', 'w')
for i in range(0, n_users):
for j in range(0, n_items):
if test[i][j] > 0:
f.write("%d, %d, %.4f\n" % (i+1, j+1, predicted_rating[i][j]))
y_true.append(test[i][j])
y_pred.append(predicted_rating[i][j])
f.close()
print ("Mean Squared Error: %f" % mean_squared_error(y_true, y_pred))
def test_model(predicted_rating, labels):
test_matrix = create_test_matrix()
calculate_error(test_matrix, predicted_rating, labels)
def main():
load_data()
user_rating = []
user_rating = create_rating_matrix()
find_avg_rating_per_user(user_rating)
labels = cluster_users(user_rating)
predicted_rating = predict_user_rating(labels, user_rating)
test_model(predicted_rating,labels)
if __name__ == '__main__':
main()
| [
"shivaniszw_bt2k14@dtu.ac.in"
] | shivaniszw_bt2k14@dtu.ac.in |
2bbf02dd3f34c138ac35839901ab09c232b7628c | 0d86e9198b210c44190e265d723514b25c5554d2 | /api_exercise/exercise.py | e8f4356f2ca73fb12f999a5c49709ced989acc8f | [] | no_license | velivelinov/NextTechGirls | b076a4d1196a9907117f99e83c7600d31addd37e | 19db6d6a4f80a7f2796d88aafc1c4eb9bf20cb5b | refs/heads/master | 2021-09-10T02:24:18.949268 | 2018-03-20T17:55:28 | 2018-03-20T17:55:28 | 126,000,891 | 1 | 0 | null | 2018-03-20T11:10:09 | 2018-03-20T10:32:34 | Python | UTF-8 | Python | false | false | 596 | py | import api_functions
# Ask the user for input for what city to search for
city =
response_data = api_functions.make_request(city)
# Ask the user for what information they'd like to find out
print('Would you like to find out: ')
print('1. Information about the city')
print('2. Coordinates of the city')
print('3. The country the city is in')
choice =
# Use the response_data dictionary in order to get the information needed!
# If the user's choice is 1 - use the 'intro' value
# If the user's choice is 2 - use the 'coordinates' value
# If the user's choice is 3 - use the 'country_id' value | [
"vevelinov@hotels.com"
] | vevelinov@hotels.com |
b9063f096b96d5a75a310bc8ea0a8636adf03b5a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /BHBXNfeMsA43d8Tys_22.py | a4efdfcb90caae2db151d39c9a348261e7d74a67 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | """
As far as we currently know, approximations for the mathematical constant
**pi** (π) in the history of mathematics started surfacing with Ancient
Babylonians, who found its correct truncation up to 1 decimal place. During
the 5th century, the Chinese mathematician Zu Chongzhi raised it to 7 decimal
places and from the 18th century onwards the number of correct pi decimal
places has seen steady growth.
Since the middle of the 20th century, the approximation of pi has been the
task of electronic digital computers. During the 2019 Pi Day on the 14th of
March, the Japanese computer scientist _Emma Haruka Iwao_ released the
currently most accurate value of pi with more than 31.4 trillion digits, using
170 Terabytes of data.
Your task is to create a function that takes a positive integer `n` as an
argument and returns the value of **pi** with its first `n` decimal digits.
Taylor series are usually used to get finer approximations. To make this
challenge approachable to anyone, the following formula is suggested:

### Examples
pi(1) ➞ "3.1"
pi(2) ➞ "3.14"
pi(30) ➞ "3.141592653589793238462643383279"
### Notes
N/A
"""
def pi(n):
i = 1
p = x = 3 * 10 ** (n + 10)
while x:
x = x * i // ((i + 1) * 4)
i += 2
p += x // i
return '3.' + str(p // 10 ** 10)[1:]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
483eaaf3e8360889b820857d67b3220540563387 | 0e06df5e10ebfd5d508afa7848b645fb0f9aa503 | /image_track.py | 57e88dc27b7c5549922e36733ea2ae401b8178c7 | [] | no_license | jrome5/Husky-Code | 47c4a5cecec8c402718177c84e8c9d442dfb1df6 | 8b89b56c801a20b7943491fdee7e24fd10398b59 | refs/heads/master | 2020-04-13T21:31:31.991570 | 2018-12-28T23:52:22 | 2018-12-28T23:52:22 | 163,458,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,105 | py | #!/usr/bin/env python
import roslib
#roslib.load_manifest('stereo_color_tracker')
import sys
import rospy
import cv2
import numpy as np
from std_msgs.msg import String, Header
from sensor_msgs.msg import Image
from geometry_msgs.msg import PointStamped, Point
from cv_bridge import CvBridge, CvBridgeError
# import IPython
import tf
import time
class image_track:
def __init__(self):
self.left_point = [0,0]
self.right_point = [0,0]
self.size = 1000 #hydrant 1m, box 14.5cm
# self.image_pub = rospy.Publisher("left_tracker_image",Image, queue_size=5)
self.point_left = rospy.Publisher("left_point", PointStamped, queue_size=5)
self.point_right = rospy.Publisher("right_point", PointStamped, queue_size=5)
self.image_pub_left = rospy.Publisher("/camera/left/image_masked",Image, queue_size = 5)
self.point_pub3 = rospy.Publisher("point3", PointStamped, queue_size=5)
# cv2.namedWindow("Image window", 1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/bumblebee2/left/image_raw",Image,self.left_callback)
self.image_sub = rospy.Subscriber("/bumblebee2/right/image_raw",Image,self.right_callback)
# Trackbar stuff
# Green Marker
# self.lower_threshold = np.array([66, 97, 180])
# self.upper_threshold = np.array([96, 222, 255])
# Green cloth on a plastic stick
# self.lower_threshold = np.array([37, 64, 73])
# self.upper_threshold = np.array([63, 149, 233])
# Green Marker 3d print table nov 26
#self.lower_threshold = np.array([60, 96, 131])
# self.upper_threshold = np.array([84, 221, 255])
#(red hydrant)
self.lower_threshold = np.array([0, 60, 0])
self.upper_threshold = np.array([3, 255, 255])
self.f = 788.4085367665094
self.b = 0.12
# 1280 x 960 image
# self.center_x = (1280.0/2.0) # half x pixels
# self.center_y = (960.0/2.0) # half y pixels
# 640 x 480
#self.center_x = (640.0/2.0) # half x pixels
#self.center_y = (480.0/2.0) # half y pixels
self.leftinfo = np.matrix([[788.4085367665094, 0.0, 512.5], [0.0, 788.4085367665094, 384.5], [0.0, 0.0, 1.0]])
self.rightinfo = np.matrix([[788.4085367665094, 0.0, 512.5], [0.0, 788.4085367665094, 384.5], [0.0, 0.0, 1.0]])
self.leftproj = np.matrix([[788.4085367665094, 0.0, 512.5, -0.0], [0.0, 788.4085367665094, 384.5, 0.0], [0.0, 0.0, 1.0, 0.0]])
self.rightproj = np.matrix([[788.4085367665094, 0.0, 512.5, -94.60902441198112], [0.0, 788.4085367665094, 384.5, 0.0], [0.0, 0.0, 1.0, 0.0]])
## cv2.namedWindow("Control"); # Threshold Controller window
# cv2.namedWindow("Thresholded Image", cv2.CV_WINDOW_AUTOSIZE); # Threshold image window
def left_callback(self,data):
## print("left")
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError, e:
print e
# IPython.embed()
# Get HSV image
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
frame_threshed = cv2.inRange(hsv, self.lower_threshold, self.upper_threshold)
imgray = frame_threshed
ret, thresh = cv2.threshold(frame_threshed, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_in = np.argmax(areas)
cnt = contours[max_in]
x, y, w, h = cv2.boundingRect(cnt)
cv2.putText(cv_image,"X: %s Y:%s" %(x,y), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,0)) # bgr
cv2.rectangle(cv_image, (x,y), (x+w, y+h), (0,255,0),2)
distance = (self.f*x/self.size)/100 #meters
self.left_point = [x, y, distance]
cv2.imshow("Thresholded Image", cv_image)
k = cv2.waitKey(3) & 0xFF
if k == 113 or k == 27: # Escape key = 27, 'q' = 113
rospy.signal_shutdown("User Exit")
try:
self.image_pub_left.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
except CvBridgeError, e:
print e
def right_callback(self,data):
## print("left")
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError, e:
print e
# IPython.embed()
# Get HSV image
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
frame_threshed = cv2.inRange(hsv, self.lower_threshold, self.upper_threshold)
imgray = frame_threshed
ret, thresh = cv2.threshold(frame_threshed, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_in = np.argmax(areas)
cnt = contours[max_in]
x, y, w, h = cv2.boundingRect(cnt)
cv2.putText(cv_image,"X: %s Y:%s" %(x,y), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,0)) # bgr
cv2.rectangle(cv_image, (x,y), (x+w, y+h), (0,255,0),2)
distance = (self.f*x/self.size)/100 #meters
self.right_point = [x, y, distance]
self.postPoint3()
def postPoint3(self):
print(self.left_point, self.right_point)
if(self.left_point == [0,0] or self.right_point == [0,0] ):
return
z = (self.f*self.b)/(self.left_point[0]-self.right_point[0])
x = self.left_point[0]*(z/self.f)
y = self.left_point[1]*(z/self.f)
print(x,y,z)
point = PointStamped(header=Header(stamp=rospy.Time.now(),
frame_id='/map'),
point=Point(x,y,z))
self.point_pub3.publish(point)
def postPointleft(self):
point = PointStamped(header=Header(stamp=rospy.Time.now(),
frame_id='/map'),
point=Point(self.left_point))
self.point_left.publish(point)
def postPointright(self):
point = PointStamped(header=Header(stamp=rospy.Time.now(),
frame_id='/map'),
point=Point(self.right_point))
self.point_right.publish(point)
def main(args):
rospy.init_node('image_track', anonymous=True)
ic = image_track()
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
cv2.destroyAllWindows()
print "Finished."
if __name__ == '__main__':
main(sys.argv)
| [
"noreply@github.com"
] | noreply@github.com |
92f6925b2a9cfb31a62f32e59f35f03425e5c4ee | fd25231975acd147e04dc3ed3627c92cb1a4f86c | /FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/io/matlab/tests/test_mio.py | a2fff9f37f188018118bed9ef6dc4f9d6725e5b8 | [] | no_license | sumitkutty/Flight-Price-Prediction | 832a2802a3367e655b46d3b44f073d917abd2320 | d974a8b75fbcbfa42f11703602af3e45a3f08b3c | refs/heads/master | 2022-12-25T07:13:06.375888 | 2020-10-08T18:46:44 | 2020-10-08T18:46:44 | 302,366,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:8ddd2d36df500761f2d9af0be22308dd2450ebd281c3b0e945bc89b26ebb413d
size 42136
| [
"sumitkutty37@gmail.com"
] | sumitkutty37@gmail.com |
ca0e505b915d355eece8230b7ee141a50d9ecacc | 18dde3388c5258659393207a5afab1eb8f7483ab | /Day_6/zad_1.py | 2dfed925f18221bc9a1f6e09b29e39b4ad64d664 | [] | no_license | msiemieniukmorawski/python | a3e9e8eda90ddf5981962e66d849b43dcb77d2c3 | 13c65d21754d894ab1342627be61794e9e29763f | refs/heads/master | 2020-03-28T16:13:19.226610 | 2018-11-11T22:37:44 | 2018-11-11T22:37:44 | 148,665,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # Zlicz wszystkie wystąpienia liter w stringu "tekst"
tekst = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."
) | [
"msiemieniukmorawski@gmail.com"
] | msiemieniukmorawski@gmail.com |
63c07cb2b9abb703e51ca131751cea808ad20a08 | 8e26d8a217003728e6ad8082009cc367c920aef4 | /ryan_project/polls/models.py | e255587c550a8530ee32a39b0410a406358a6292 | [] | no_license | ryan-vong/ryan_django | 2f3ebaf64806ac69f00890a69f7ef5a7a36ffb4b | 5d6ae854c84edcd919d881553dceba1510932981 | refs/heads/master | 2020-05-20T01:18:09.414735 | 2019-05-07T02:33:05 | 2019-05-07T02:33:05 | 185,306,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"ryan.vong88@gmail.com"
] | ryan.vong88@gmail.com |
72fb6a38c5f5d698ef3de0e95fd431195f0c6c1c | 4522fc52bc43654aadd30421a75bae00a09044f0 | /riley/dev.py | 1c924685ce9976e08ff5b678bf63dcb402aa2ce4 | [] | no_license | qesoalpe/anelys | 1edb8201aa80fedf0316db973da3a58b67070fca | cfccaa1bf5175827794da451a9408a26cd97599d | refs/heads/master | 2020-04-07T22:39:35.344954 | 2018-11-25T05:23:21 | 2018-11-25T05:23:21 | 158,779,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | from dict import Dict as dict
import os
import os.path
from isodate import datetime_isoformat
from datetime import datetime
from pathlib import Path
path_root = Path(r'/home/picazo/anelys')
if os.path.sep != '/':
os.path.sep = '/'
from katherine import d6
def get_datetime(timestamp):
return datetime_isoformat(datetime.fromtimestamp(timestamp))
def parse_dir(dirpath):
dir = Dict()
dir.children = list()
dir.path = '/' + dirpath + '/'
dir.type = 'riley/directory'
if path_relative:
paths = os.listdir(dirpath)
if dirpath != '.':
paths = [os.path.join(dirpath, path).replace('\\', '/') for path in paths]
else:
paths = [path.replace('\\', '/') for path in paths]
for path in paths:
if os.path.isdir(path) and os.path.basename(path) not in ['__cache__', '__pycache__']:
dir.children.append(parse_dir(path))
elif os.path.isfile(path) and os.path.splitext(path)[1] in ['.py', '.pyw']:
f = open(path, 'rb')
import hashlib
md5_hashlib = hashlib.md5()
for chunk in iter(lambda: f.read(4096), b''):
md5_hashlib.update(chunk)
f.close()
file = Dict()
file.md5 = md5_hashlib.hexdigest().upper()
file.path = '/' + path
file.size = os.path.getsize(path)
file.modified_datetime = get_datetime(os.path.getmtime(path))
file.type = 'riley/file'
dir.children.append(file)
return dir
os.chdir(path_root)
tree = parse_dir('.')
def get_locals(dir):
rr = [child for child in dir.children if child.type == 'riley/file']
for m in [child for child in dir.children if child.type == 'riley/directory']:
rr.extend(get_locals(m))
from copy import deepcopy
m = deepcopy(m)
for k in list(m.keys()):
if k not in ['path', 'type']:
del m[k]
rr.append(m)
return rr
locals = get_locals(tree)
# cursor_db = db_mariadb.cursor()
# from pprint import pprint
#
# cursor_db = db_mariadb.cursor(pymysql.cursors.DictCursor)
# cursor_db.execute('select filepath as path, md5, size, modified_datetime from riley.file;')
#
# remotes = [Dict(file) for file in cursor_db]
#
# for file in remotes:
# file.modified_datetime = datetime_isoformat(file.modified_datetime)
#
#
#
# for katherine in locals:
# if 'path' in katherine:
# if katherine.path[0] != '/':
# katherine.path = '/' + katherine.path
#
# from pymongo import MongoClient
# db_mongo_local = MongoClient(port=27020)
# db_riley = db_mongo_local.get_database('riley')
# coll_snapshot_sync = db_riley.get_collection('snapshot_sync')
#
# snapshot = coll_snapshot_sync.find_one(projection={'_id': False},
# sort=[('datetime', -1)])
# if snapshot is not None:
# snapshots = snapshot.snapshots
# else:
# snapshots = None
#
# persisted_path = [file.path for file in persisted]
# locals_path = [file.path for file in locals]
#
#
# def persist_file(file):
# pass
#
# pprint(locals_path)
# pprint(persisted_path)
#
#
# snapshots = Dict({'snapshot': locals, 'datetime': datetime_isoformat(datetime.now())}) | [
"qesoalpe@gmail.com"
] | qesoalpe@gmail.com |
26772736bdf082f38dd3107781a34151c0e314de | 16cb24621f0935ad25589584ac960979ce95bcfa | /synth.py | 01e6e38ba9a56b04438a856214f9a0cb0b609c51 | [
"Apache-2.0"
] | permissive | Phaneendhr/dialogflow-python-client-v2 | 8a1851214579ebd8f0a29ef193297a3e43d85641 | 8c9c8709222efe427b76c9c8fcc04a0c4a0760b5 | refs/heads/master | 2020-05-19T01:34:19.044642 | 2018-12-18T23:44:33 | 2018-12-18T23:44:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
versions = ['v2beta1', 'v2']
for version in versions:
library = gapic.py_library('dialogflow', version)
s.move(
library,
excludes=[
'google/**/*', 'setup.py', 'README.rst', 'docs/index.rst', 'nox.py'])
s.move(
library / f'google/cloud/dialogflow_{version}',
f'dialogflow_{version}')
# Due to dialogflow being unique to the other google-cloud-* libraries,
# a decent number of edits need to be done to correct naming and namespaces
docs_paths = ['docs/**/*.rst', 'docs/conf.py']
s.replace(docs_paths, 'google-cloud-dialogflow', 'dialogflow')
s.replace(docs_paths, 'google.cloud.dialogflow', 'dialogflow')
code_paths = ['tests/unit/gapic/**/*.py',
f'dialogflow_{version}/**/*.py']
s.replace(
code_paths, 'import google.cloud.dialogflow', 'import dialogflow')
s.replace(code_paths, 'from google.cloud\.', 'from ')
s.replace(code_paths, 'from google.cloud import', 'import')
s.replace(code_paths, 'google-cloud-dialogflow', 'dialogflow')
s.replace(code_paths, "'-dialogflow'", "'dialogflow'")
s.replace(
code_paths,
"(Returns:\n\s+)([a-zA-Z]+Client:)",
f"\g<1>dialogflow_{version}.\g<2>")
s.replace(
code_paths,
'(`Dialogflow documentation <.*?>`)_\.',
'\g<1>__.')
# Unexpected Indentation: https://github.com/googleapis/gapic-generator/issues/2157
# For now strip this example.
s.replace(f'dialogflow_{version}/gapic/agents_client.py',
'Example for.*\n\s+<pre>.*\n(.*\n)+?.*?</pre>', '')
# Some docstrings have oddly placed literal markers
s.replace(
[f'dialogflow_{version}/gapic/entity_types_client.py',
f'dialogflow_{version}/gapic/intents_client.py'],
"^\s+::\n\n",
"")
# Some files are missing the appropriate utf-8 header
# -*- coding: utf-8 -*-
s.replace(
["dialogflow_v2beta1/proto/session_pb2.py",
'dialogflow_v2beta1/proto/intent_pb2_grpc.py',
'dialogflow_v2/proto/intent_pb2_grpc.py',
'dialogflow_v2/proto/session_pb2.py',
],
"# Generated by the .*",
"# -*- coding: utf-8 -*-\n\g<0>")
s.replace(
['dialogflow_v2beta1/gapic/intents_client.py',
'dialogflow_v2beta1/gapic/sessions_client.py',
'dialogflow_v2/gapic/intents_client.py',
],
"# Copyright 2018 Google LLC",
"# -*- coding: utf-8 -*-\n\g<0>")
# Docstring has an extra '\' at the end of it '}\" \'
s.replace(
'dialogflow_v2/gapic/agents_client.py',
r'}\\\" [\\]\n(\s+retry \(Optional)',
'}\\"\n\g<1>')
s.replace('dialogflow_v2/proto/agent_pb2.py', ':math:', '')
s.replace('dialogflow_v2/proto/agent_pb2.py', ':raw-latex:', '')
| [
"noreply@github.com"
] | noreply@github.com |
b00ed5b05858e40a58a02521f0e18f41ce5fa08c | abdc9c72f2afc04bb8aedc7245837a5eb0febd6a | /scripts/led_controle.py | aa7400ae18c1e0fca90a57abcfd9e408e01c531c | [
"MIT"
] | permissive | TomiyaMorita/robosys | 90fb5454ce52c405cfed7d0c4ab3691c6c75a374 | 70dc21fd6db3f6853e7bdadb99cfff954697df99 | refs/heads/master | 2020-04-16T18:41:44.460028 | 2019-02-05T15:51:54 | 2019-02-05T15:51:54 | 165,830,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | #!/usr/bin/env python
import RPi.GPIO as GPIO
import rospy
import std_msgs.msg
GPIO.setmode(GPIO.BOARD)
gpio_list=[13,15,16,18,22]
for i in range(5):
GPIO.setup(gpio_list[i], GPIO.OUT)
def callback(data, id):
rospy.loginfo("[ID:"+str(id)+"] : " + str(data.data))
if(data.data==1):
GPIO.output(gpio_list[id-1],True)
else:
GPIO.output(gpio_list[id-1],False)
def listener():
rospy.init_node('listener', anonymous=True)
led1 = rospy.Subscriber("/led1", std_msgs.msg.Int8, callback, callback_args=1)
led2 = rospy.Subscriber("/led2", std_msgs.msg.Int8, callback, callback_args=2)
led3 = rospy.Subscriber("/led3", std_msgs.msg.Int8, callback, callback_args=3)
led4 = rospy.Subscriber("/led4", std_msgs.msg.Int8, callback, callback_args=4)
led5 = rospy.Subscriber("/led5", std_msgs.msg.Int8, callback, callback_args=5)
rospy.spin()
if __name__ == '__main__':
listener()
| [
"noreply@github.com"
] | noreply@github.com |
d6138c99a587bcfea289cfd39bd8931723725038 | e2a142562ab3f322aedc75d613e6ccd72c00f942 | /users/migrations/0002_user_is_staff.py | 064721d104529e3a4714e94a32fe65b9370a7ad4 | [] | no_license | pavanasrivinaya/Todobackedserver_django | 5f8da0cfec35e53cd37d4a69265e0108c10291d6 | ffdf71c248ef72805136ea0cb2805300cf0226c2 | refs/heads/main | 2023-01-27T15:09:46.208059 | 2020-12-06T17:34:14 | 2020-12-06T17:34:14 | 318,553,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # Generated by Django 3.1.4 on 2020-12-04 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False),
),
]
| [
"pavanasri.nyros@gmail.com"
] | pavanasri.nyros@gmail.com |
432c01b9bc0749e080f5030723946eea795b05b6 | eeade223e39130cac09fb4907da6410101af5935 | /setup.py | 3e74c7d16553cc22209fc859a2c55f4468a03da1 | [] | no_license | TrendingTechnology/jaxfg | 67cac95f7e37c2eac75574fa8473b89cc222137e | 7f19668b344944be196e6b61fdc36f1441bac819 | refs/heads/master | 2023-06-20T17:20:43.928788 | 2021-07-30T23:24:42 | 2021-07-31T00:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | from setuptools import find_packages, setup
setup(
name="jaxfg",
version="0.0",
description="Factor graphs in Jax",
url="http://github.com/brentyi/jaxfg",
author="brentyi",
author_email="brentyi@berkeley.edu",
license="BSD",
packages=find_packages(),
package_data={"jaxfg": ["py.typed"]},
python_requires=">=3.7",
install_requires=[
"datargs",
"jax>=0.2.13",
"jaxlib",
"jaxlie>=1.0.0",
"jax_dataclasses>=1.0.0",
"overrides",
"scikit-sparse",
"termcolor",
"tqdm",
"typing_utils", # We can phase this out if we drop support for Python 3.7
"matplotlib",
],
extras_require={
"testing": [
"pytest",
# "pytest-cov",
# "hypothesis",
# "hypothesis[numpy]",
],
"type-checking": [
"mypy",
"types-termcolor",
],
},
)
| [
"yibrenth@gmail.com"
] | yibrenth@gmail.com |
ef3444dcd2128c8682185a7fb5f36cc1e1f4760d | 7620709ca64ef49b4fe4721296e4c95a31cb2bb9 | /import_requests1.py | 7472aa177dfc9f81245942da8970e487a7d584de | [] | no_license | simma1/coder-course | aefefa59a18263dc42101db764b98a2faae476c5 | 326931d6cfd63946ed2465bad5ea6af10719fc81 | refs/heads/master | 2022-04-17T10:10:16.079243 | 2020-04-16T06:22:41 | 2020-04-16T06:22:41 | 256,127,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | import requests, bs4
for counter in range (1900,2000):
res = requests.get('https://cineplex.com.au/movie/' + str(counter))
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
body = soup.body
try:
h2 = body.select(".movie-header > h2")[0]
h2str = str(h2)[4:-5]
except Exception as e:
print('movie locked in disney vault')
else:
print(h2str)
with open ('resul2.txt', 'a+') as file:
file.write(h2str)
file.write('\n')
| [
"spi591@icloud.com"
] | spi591@icloud.com |
2451845e748f9377ad8a74317dffb70d9e17c562 | 4d6f41419c51018ecc76ec0e959686980dcd90a5 | /Teacher/models.py | dd1980e73ef0a0ce3dbb17fabfec69509f8db221 | [] | no_license | TangyeWeisuo/SoftwareEngineering | c200a56be6f0e98afa5e195e5657343df83245f3 | 1fa02a155148bc4173abafa494e665e20fa32e78 | refs/heads/master | 2021-01-18T09:15:31.843868 | 2015-12-25T16:07:23 | 2015-12-25T16:07:23 | 43,533,462 | 1 | 4 | null | 2015-12-25T10:29:20 | 2015-10-02T03:04:39 | Python | UTF-8 | Python | false | false | 844 | py | # -*- coding: cp936 -*-
from django.db import models
# Create your models here.
class Lab(models.Model):
# The model of laboratory.
name = models.CharField(max_length=20)
introduction = models.TextField()
class Teacher(models.Model):
# The model of teacher.
username = models.CharField(max_length=10)
password = models.CharField(max_length=15)
email = models.EmailField(max_length=20,null=True)
name = models.CharField(max_length=30,null=True)
age = models.PositiveIntegerField(verbose_name=0, null=True)
gender = models.BooleanField(default=1)
photo = models.ImageField(upload_to='img', null=True)
introduction = models.TextField(null=True)
foundation = models.TextField(null=True)
subject = models.CharField(max_length=10, null=True)
lab = models.ForeignKey(Lab, null=True)
| [
"zhaoxihang@outlook.com"
] | zhaoxihang@outlook.com |
ea0247a08d3dbfcc08f7339be1353955119ac626 | f5ef25c84e9b4846f98d520bc9a20d20b3d1b65c | /OOP/oop3.py | e7b6dedf4ec707a9c8abd83d79293c2b25573e9b | [] | no_license | amiraHag/python-basic-course2 | 45757ffdfa677c2accd553330cd2fd825208b0aa | 1fbfd08b34f3993299d869bd55c6267a61dc7810 | refs/heads/main | 2023-03-31T06:48:11.587127 | 2021-03-30T03:43:10 | 2021-03-30T03:43:10 | 327,271,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # --------------------------------------------------------------------
# -- Object Oriented Programming => Instance Attributes and Methods --
# --------------------------------------------------------------------
# Self: Point To Instance Created From Class
# Instance Attributes: Instance Attributes Defined Inside The Constructor
# -----------------------------------------------------------------------
# Instance Methods: Take Self Parameter Which Point To Instance Created From Class
# Instance Methods Can Have More Than One Parameter Like Any Function
# Instance Methods Can Freely Access Attributes And Methods On The Same Object
# Instance Methods Can Access The Class Itself
# -----------------------------------------------------------
class Member:
def __init__(self, first_name, middle_name, last_name):
self.fname = first_name
self.mname = middle_name
self.lname = last_name
member_one = Member("Amira", "Mustafa", "HM")
member_two = Member("Ahmed", "Hag", "Imam")
member_three = Member("Sara", "HI", "Mustafa")
# print(dir(member_one))
print(member_one.fname, member_one.mname, member_one.lname)
print(member_two.fname)
print(member_three.fname)
| [
"amira071846@feng.bu.edu.eg"
] | amira071846@feng.bu.edu.eg |
d12232dd22a9c1102cc8b4febd5d24e237ee5226 | 107afe63efb931157c104cf091377a22e2e363c9 | /node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/ws/node_modules/bufferutil/build/config.gypi | 82fa05534d4440ef6db3368044369c047b12cdfe | [
"MIT"
] | permissive | alejandro91dc/buscaminas | be6e1aed6a6be866ded3bc99545685a96b6a5584 | b6745efb8a77f8a8f5fa59d2d8e5d5f85150cd60 | refs/heads/master | 2021-01-10T14:27:25.458720 | 2016-04-14T02:19:53 | 2016-04-14T02:19:53 | 55,897,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,361 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"gas_version": "2.25",
"host_arch": "x64",
"icu_small": "false",
"node_byteorder": "little",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_release_urlbase": "",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": 0,
"want_separate_host_toolset": 0,
"nodedir": "/root/.node-gyp/4.2.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.14.7 node/v4.2.1 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/root/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/root/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "4.2.1",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/root/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
| [
"diazcarmona.alejandro91@gmail.com"
] | diazcarmona.alejandro91@gmail.com |
4f12be37afae4fbcb6355416aaebcf9fffa74a72 | 368a8bf1834a19bb6b330cdad4caeccd5687185f | /unsupervised_circle_location.py | c85a16cfbec61a11150bc379909ff72e2a851343 | [] | no_license | gr-b/unsupervised_pixelwise_reproduction_loss_poc | 0d2522f7a206ac356b8ee121a73e413e6d2084ad | d7e968c2840f385b6d7776bdf037d7faa0316777 | refs/heads/master | 2020-05-06T15:47:26.053975 | 2019-04-09T15:03:08 | 2019-04-09T15:03:08 | 180,208,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,545 | py | import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.layers import Input, Dense, Dropout, Lambda, BatchNormalization, Activation
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from tensorflow.keras import losses
from tensorflow.keras.callbacks import Callback
import tensorflow as tf
class Progress(Callback):
def __init__(self, model):
self.model = model
self.target_img, self.target_center = generate_data(w, h, 1)
self.target_img = self.target_img[0].reshape(w*h,)
self.target_center = self.target_center[0]
def on_batch_end(self, batch, logs={}):
result = model.predict(np.array([self.target_img]))
x, y = result[0]
#print("Target center: " + str(self.target_center) + " Predicted: " + str((x, y)))
from PIL import Image
def show(image):
plt.imshow(image, cmap='gray')
plt.show()
r = 25
batchSize = 1
w, h = 100, 100
# Generates an image of the given width, height
# With a randomly placed circle of random radius
def generate_circle_image(w, h, cx, cy):
xgrid, ygrid = np.meshgrid(np.arange(0, w), np.arange(0, h))
#cx, cy, cr = np.random.randint(0, w), np.random.randint(0, h), np.random.randint(0, int(w/3))
# Stack grids so that they broadcast with xs, ys
xgrid = np.stack([xgrid]*cx.shape[0])
ygrid = np.stack([ygrid]*cy.shape[0])
xcomp = ((xgrid.T - cx).T)**2
ycomp = ((ygrid.T - cy).T)**2
circle = ((xcomp + ycomp).T / (2*(100**2))).T
return circle
def generate_single_circle_image(w, h, cx, cy):
xgrid, ygrid = np.meshgrid(np.arange(0, w), np.arange(0, h))
circle = (xgrid-cx)**2 + (ygrid-cy)**2
circle = (circle / (2*(100**2)))
return circle
def generate_data(w, h, n):
y = np.array([[np.random.randint(0, w), np.random.randint(0, h)] for i in range(n)])
x = generate_circle_image(w, h, y[:, 0], y[:, 1])
return x, y
n = 2000
x_train, y_train = generate_data(w, h, n)
x_train = np.array([x_train[0]]*n)
y_train = np.array([y_train[0]]*n)
xgrid, ygrid = np.meshgrid(np.arange(0, w), np.arange(0, h))
#xgrid = np.stack([xgrid]*batchSize)
#ygrid = np.stack([ygrid]*batchSize)
def pixelwise_reproduction_loss(y_true, y_pred):
cx, cy = y_pred[:,0], y_pred[:,1]
x_grid, y_grid = K.variable(value=xgrid), K.variable(value=ygrid)
xcomp = (K.transpose(K.transpose(x_grid) - cx))**2
ycomp = (K.transpose(K.transpose(y_grid) - cy))**2
circle = K.transpose(K.transpose(xcomp + ycomp))
circle = (circle / (2*(100**2)))
circle = K.reshape(circle, (w*h, ))
mse = K.mean(((circle-y_true)**2))
return mse
def model():
model = Sequential()
model.add(Dense(768, input_shape=(w*h,), activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.add(Lambda(lambda x: x*w))
# output is (x, y, r)
model.compile(loss=pixelwise_reproduction_loss, optimizer='adam')
return model
model = model()
print(model.summary())
progress = Progress(model)
x_train = x_train.reshape(n, w*h)
model.fit(x_train, x_train, epochs=1, verbose=1,
batch_size=batchSize,
callbacks=[progress])
#x_test, y_test = generate_data(w, h, n)
#x_test = x_test.reshape(n, w*h)
y_pred = model.predict(x_train)
mse = np.mean((y_pred-y_train)**2)
print("MSE:" + str(mse))
plt.imshow(x_train[0].reshape(w, h), cmap='gray')
plt.show()
y = y_pred[0]
y_img = generate_single_circle_image(w, h, y[0], y[1])
show(y_img)
| [
"grbishop@wpi.edu"
] | grbishop@wpi.edu |
043fea3c1e80bfb5060a645f59cc59a63628d802 | 0388c546467c228da8d11e491ddb73f6c73efe64 | /calculator.py | a34c4b64e1dbbce3d0e61e5fb847ce6af68d9e02 | [] | no_license | Rantpel/Batch-Four | 916976d7c874e3092d66372db3216e6cc43eb1c5 | c5792d0975dd85b897b037cb6f69030199f0bf78 | refs/heads/master | 2020-03-28T13:23:01.270264 | 2018-10-14T17:53:18 | 2018-10-14T17:53:18 | 148,390,424 | 0 | 0 | null | 2018-09-11T22:57:04 | 2018-09-11T22:57:04 | null | UTF-8 | Python | false | false | 4,533 | py | #import maths library
import math
#purpose of the program
print("WELCOME TO ALL PURPOSE CALCULATOR")
#programs instructions
print("Select which shape you would want to calculate it value.")
#display shapes
s=int(input("1."+"Rectangle\n2."+"Circle\n3."+"Square\n4."+"Triangle\n5."+"Trapezium\nAns:"))
#use of the ilterations/loops and also to kickoff program
#ilteration for Rectangle
if(s==1):
print("what do you want to calculate?")
l=int(input("1."+"AREA\n2."+"PERIMETER\nAns:"))
if(l==1):
e=int(input("The length value:"))
k=int(input("The breadth value:"))
a=e*k
print("The Area of such Rectangle:",a)
elif(l==2):
e=int(input("The length value:"))
k=int(input("The breadth value:"))
p=e+k
print("The Perimeter of such Rectangle:",p)
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
#ilteration for Circle
elif(s==2):
print("What do you want to calculate?")
j=int(input("1."+"AREA\n2."+"CIRCUMFERENCE\n3."+"RADIUS\n4."+"DIAMETER\nAns:"))
if(j==1):
r=int(input("The radius value:"))
n=math.pi
a=n*r**2
print("The Area of such Circle:",a)
elif(j==2):
r=int(input("The radius value:"))
n=math.pi
c=2*n*r
print("The Circumference of such Circle:",c)
elif(j==3):
i=int(input("1."+"from Area\n2."+"from Circumference\nChoice:"))
if(i==1):
a=int(input("The Area of the Circle:"))
n=math.pi
s=math.sqrt
r=s(a/n)
print("Radius is:",r)
elif(i==2):
a=int(input("The Circumference of the Circle:"))
n=math.pi
r=a/(2*n)
print("Radius is:",r)
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
elif(j==4):
i=int(input("1."+"from Area\n2."+"from Circumference\nChoice:"))
if(i==1):
a=int(input("The Area of the Circle:"))
n=math.pi
s=math.sqrt
d=2*(s(a/n))
print("Diameter is:",d)
elif(i==2):
a=int(input("The Circumference of the Circle:"))
n=math.pi
d=2*(a/(2*n))
print("Diameter is:",d)
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
#ilteration for Square
elif(s==3):
print("what do you want to calculate?")
l=int(input("1."+"AREA\n2."+"PERIMETER\nAns:"))
if(l==1):
y=int(input("The length"))
a=y**2
print("The Area of a Square is:",a)
elif(l==2):
y=int(input("The length"))
p=2*y
print("The Perimeter of a Square is:",p)
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
#ilteration for Triangle
elif(s==4):
print("what do you want to calculate?")
l=int(input("1."+"AREA\n2."+"PERIMETER\nAns:"))
if(l==1):
b=int(input("input Base:"))
h=int(input("input Height:"))
a=0.5*b*h
print("The Area of a Triangle is:",a)
elif(l==2):
b=int(input("input Base:"))
h=int(input("input Height:"))
c=int(input("input slantheight:"))
u=b+h+c
print("The Perimeter of a Triangle is:",u)
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
#ilteration for Trapezium
elif(s==5):
print("what do you want to calculate?")
l=int(input("1."+"AREA\n2."+"PERIMETER\nAns:"))
if(l==1):
a=int(input("input top length:"))
h=int(input("input Height:"))
c=int(input("input Base lenth:"))
A=(c+a/2)*h
print("The Area of a Trapezium is:",A)
elif(l==2):
a=int(input("input top length:"))
b=int(input("input slantHeight:"))
c=int(input("input Base length:"))
p=a+b+b+c
print("The Perimeter of a Trapezium is:",p)
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
else:
print("invalid syntax")
print("Thank you for using all purpose calculator")
print("Please kindly give us feedback on things to improve.")
#partial end of program
| [
"noreply@github.com"
] | noreply@github.com |
3da4cb1219d09fd63aaf1bdabad64c368078efb2 | c3d03de240c65c1945f843c8fd0b4d86f3540f75 | /cls2det/model/builder.py | 7040f2ec28d24be252b9ad75650ba2546114b25b | [] | no_license | YuxinZou/cls2det | f9382bbab17f4eeaa62276f6536cb6b590084096 | 381c731ecc2504bf49a3842afda65c75170ba86b | refs/heads/master | 2022-10-17T16:28:17.975053 | 2020-06-11T07:39:06 | 2020-06-11T07:39:06 | 249,424,029 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import torch
from .model import resnet18
def model_builder(cfg):
if cfg.gpu is not None and torch.cuda.is_available():
print('=> use GPU: {}'.format(cfg.gpu))
device = torch.device(f'cuda:{cfg.gpu}')
else:
print('=> use CPU')
device = torch.device('cpu')
print('=> building pre-trained model resnet18')
model = resnet18(pretrained=True)
model = model.to(device)
model.eval()
return model, device
| [
"zouyux@outlook.com"
] | zouyux@outlook.com |
9a0a32664eb32200ecc56fb66c1444ceee1270a9 | 7dc65b6d2e857c807bd2f75e2586af5f8e933fe5 | /fixtures/vcenter_gateway.py | 1c031b4ea91e3d21c538dd73e06dfdc6d23c41d9 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | vkolli/contrail-test-perf | d6fdc20f4a2004066c5a6316afd915ecdc9366c2 | db04b8924a2c330baabe3059788b149d957a7d67 | refs/heads/master | 2021-01-18T15:36:18.120487 | 2017-03-30T19:19:30 | 2017-03-30T19:19:30 | 86,661,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,851 | py | from vcenter import *
from vnc_api.vnc_api import *
from lif_fixture import LogicalInterfaceFixture
from physical_device_fixture import PhysicalDeviceFixture
from pif_fixture import PhysicalInterfaceFixture
from port_fixture import PortFixture
from openstack import OpenstackAuth, OpenstackOrchestrator
from contrailapi import ContrailVncApi
class VcenterGatewayOrch(VcenterOrchestrator):
def __init__(self, inputs, host, port, user, pwd, dc_name, vnc, logger):
super(VcenterGatewayOrch, self).__init__(inputs, host, port, user, pwd, dc_name, vnc, logger)
self.plug_api = ContrailPlugApi(inputs,vnc,logger)
def create_vn(self, name, subnets, **kwargs):
vn_obj = super(VcenterGatewayOrch, self).create_vn(name, subnets, **kwargs)
self.plug_api.create_network_in_contrail_cluster(name,subnets,**kwargs)
return vn_obj
def delete_vn(self, vn_obj, **kwargs):
super(VcenterGatewayOrch, self).delete_vn(vn_obj, **kwargs)
self.plug_api.delete_network_from_contrail_cluster(vn_obj.name,**kwargs)
def create_vm(self, vm_name, image_name, vn_objs, count=1, zone=None, node_name=None, **kwargs):
vm_objs = super(VcenterGatewayOrch, self).create_vm(vm_name, image_name, vn_objs, count=1, zone=None, node_name=None, **kwargs)
retry_vms = []
retry_vms = vm_objs[:]
for vm in retry_vms:
if self.get_vm_detail(vm):
retry_vms.remove(vm)
else:
continue
for vm in vm_objs:
for network in vm.networks:
vlanId = network.config.defaultPortConfig.vlan.vlanId
net_name = network.name
if net_name in vm.macs:
mac = vm.macs[net_name]
else:
mac = None
self.plug_api.create_vmi_lif_and_attach_vmi_to_lif(vn_name=net_name,mac_address=mac,vlan=vlanId,vm=vm)
for vm in vm_objs:
vm.bring_up_interfaces(self,vm,intfs=['eth0'])
for vm in vm_objs:
vm.get()
self.plug_api.create_vmobj_in_api_server(vm)
return vm_objs
def create_vn_vmi_for_stp_bpdu_to_be_flooded(self,**kwargs):
self.plug_api.create_network_in_contrail_cluster(name='stp_vn',subnet=[{'cidr':'122.121.123.0/24'}],**kwargs)
#The below code is needed for not to
#create the stp vmi port if already exists
#
interfaces = self._vnc.virtual_machine_interfaces_list()
for intf in interfaces['virtual-machine-interfaces']:
uuid = intf['uuid']
intf_obj = self._vnc.virtual_machine_interface_read(id=uuid)
mac_obj = intf_obj.get_virtual_machine_interface_mac_addresses()
macs = mac_obj.mac_address
if macs:
for mac in macs:
if mac == '02:02:03:04:05:06':
return
self.plug_api.create_vmi_lif_and_attach_vmi_to_lif(vn_name='stp_vn',mac_address='02:02:03:04:05:06',vlan='0')
def delete_vm(self, vm, **kwargs):
super(VcenterGatewayOrch, self).delete_vm(vm, **kwargs)
self.plug_api.delete_vmi_and_detach_vmi_to_lif(vm)
self.plug_api.delete_vmobj_in_api_server(vm)
class ContrailPlugApi(object):
def __init__(self, inputs, vnc, logger):
self._inputs = inputs
self._vnc = vnc
self.logger = logger
self._proj_obj = self._get_project_object()
self._ipam_obj = self._get_ipam_object()
self._gw = self._process_vcenter_gateway_info()
self.vnc_h = ContrailVncApi(self._vnc, self.logger)
def _get_project_object(self):
return self._vnc.project_read(fq_name = self._inputs.project_fq_name)
def _get_ipam_object(self):
return self._vnc.network_ipam_read(
fq_name=['default-domain', 'default-project', 'default-network-ipam'])
def create_network_in_contrail_cluster(self,name,subnet,**kwargs):
self.vn_uuid = self._create_vn(name,subnet)
pass
def delete_network_from_contrail_cluster(self,vn_name,**kwargs):
self._delete_vn(vn_name)
pass
def delete_vmi_and_detach_vmi_to_lif(self,vm):
self.delete_lif(vm)
self._delete_vmi(vm)
def delete_lif(self,vm):
self._delete_lif(vm)
def create_vmobj_in_api_server(self,vm_obj):
vm_uuid = vm_obj.id
try:
self.vnc_h.create_virtual_machine(vm_uuid=vm_uuid)
except Exception as e:
self.logger.error("VM object create in api failed for vm id %s"%(vm_uuid))
raise
vm_api_obj = self._vnc.virtual_machine_read(id=vm_obj.id)
for port in vm_obj.ports:
port_uuid = port.uuid
port_obj = self._vnc.virtual_machine_interface_read(id=port_uuid)
port_obj.set_virtual_machine(vm_api_obj)
self._vnc.virtual_machine_interface_update(port_obj)
def delete_vmobj_in_api_server(self,vm_obj):
vm_uuid = vm_obj.id
try:
self.vnc_h.delete_virtual_machine(vm_uuid=vm_uuid)
except Exception as e:
self.logger.error("VM object delete in api failed for vm id %s"%(vm_uuid))
def create_vmi_lif_and_attach_vmi_to_lif(self,vn_name,mac_address,vlan,vm=None):
vn_obj = self._read_vn(vn_name)
vn_id = vn_obj.uuid
#create vmi
port = self._create_vmi(vn_id=vn_id,mac_address=mac_address,
vm=vm )
#for each vrouter gateway port , create lif
for gw in self._gw:
for phy_port in gw.ports:
lif_name = phy_port + '.' + str(vlan)
pif_id = gw.get_port_uuid(phy_port)
self._create_lif(lif_name,vlan,pif_id,vm=vm,vmi_ids = [port.uuid])
def _create_vn(self, vn_name, vn_subnet):
vn_obj = VirtualNetwork(vn_name, parent_obj=self._proj_obj)
for pfx in vn_subnet:
px = pfx['cidr'].split('/')[0]
pfx_len = int(pfx['cidr'].split('/')[1])
subnet_vnc = IpamSubnetType(subnet=SubnetType(px, pfx_len))
vnsn_data = VnSubnetsType([subnet_vnc])
vn_obj.add_network_ipam(self._ipam_obj, vnsn_data)
try:
self._vnc.virtual_network_create(vn_obj)
except RefsExistError:
pass
def _delete_vn(self, vn_name):
vn_fq_name = VirtualNetwork(vn_name, self._proj_obj).get_fq_name()
try:
self._vnc.virtual_network_delete(fq_name=vn_fq_name)
except cfgm_common.exceptions.NoIdError:
pass
# end _delete_vn
def _read_vn(self,vn_name):
vn_fq_name = VirtualNetwork(vn_name, self._proj_obj).get_fq_name()
try:
vn_obj = self._vnc.virtual_network_read(fq_name=vn_fq_name)
except cfgm_common.exceptions.NoIdError:
pass
return vn_obj
def _create_lif(self,name,vlan,pif_id,vmi_ids=[],vm=None):
lif_obj = LogicalInterfaceFixture(
name, pif_id=pif_id, vlan_id=vlan,vmi_ids=vmi_ids)
lif_obj.setUp()
if vm:
vm.lifs.append(lif_obj)
def _delete_lif(self,vm):
for lif in vm.lifs:
lif.cleanUp()
def _create_vmi(self,vn_id,mac_address,
fixed_ips=[],security_groups=[],
extra_dhcp_opts=[],
project_obj=None,vm=None):
port = PortFixture(vn_id,
api_type='contrail',
mac_address=mac_address,
fixed_ips=fixed_ips,
extra_dhcp_opts=extra_dhcp_opts,
project_obj=self._proj_obj,
security_groups=security_groups)
port.setUp()
if vm:
vm.ports.append(port)
return port
def _delete_vmi(self,vm):
for port in vm.ports:
port.cleanUp()
def _process_vcenter_gateway_info(self):
return [VcenterGateway(gw) for gw in self._inputs.vcenter_gateway]
class VcenterGateway:
"""Represents one vcenter gateway."""
def __init__(self,gateway):
self.gateway = gateway
@property
def name(self):
return self.gateway['name']
@property
def mgmt_ip(self):
return self.gateway['mgmt_ip']
@property
def ports(self):
return self.gateway['ports']
def get_port_uuid(self,port):
phy_device_fixture=PhysicalDeviceFixture(self.name,self.mgmt_ip)
phy_device_fixture.setUp()
phy_device_uuid = phy_device_fixture.phy_device.uuid
pif_fixture=PhysicalInterfaceFixture(port,device_id=phy_device_uuid)
pif_fixture.setUp()
return pif_fixture.uuid
| [
"root@5b3s45.contrail.juniper.net"
] | root@5b3s45.contrail.juniper.net |
41a07c3946ee192f7815792cd8694e18b33b2e57 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /tracformsplugin/tags/tracforms-0.3/0.11/tracforms/model.py | 417536d95b876557cb23396b175b7c80cc3f8843 | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,608 | py | # -*- coding: utf-8 -*-
from trac.resource import Resource, ResourceNotFound
from api import FormSystem, _
__all__ = ['Form']
class Form(object):
"""Trac resource representation of a TracForms form."""
@staticmethod
def id_is_valid(num):
try:
return 0 < int(num) <= 1L << 31
except ValueError:
raise ResourceNotFound(
_("TracForm %(form_id)s does not exist.", form_id=num),
_("Invalid form number"))
def __init__(self, env, form_resource_or_parent_realm=None,
parent_id=None, subcontext=None, form_id=None, version=None):
self.env = env
# prepare db access
self.forms = FormSystem(env)
self.realm = 'form'
self.subcontext = subcontext
self.siblings = []
# DEVEL: support for handling form revisions not implemented yet
if isinstance(form_resource_or_parent_realm, Resource):
self.resource = form_resource_or_parent_realm
parent = self.resource.parent
if self.siblings == []:
self._get_siblings(parent.realm, parent.id)
else:
parent_realm = form_resource_or_parent_realm
if form_id not in [None, ''] and self.id_is_valid(form_id):
self.id = int(form_id)
else:
self.id = None
if self.id is not None and (parent_realm is None or \
parent_id is None or subcontext is None):
# get complete context, required as resource parent
ctxt = self.forms.get_tracform_meta(self.id)[1:4]
parent_realm = ctxt[0]
parent_id = ctxt[1]
self.subcontext = ctxt[2]
elif isinstance(parent_realm, basestring) and \
parent_id is not None and self.id is None:
# find form(s), if parent descriptors are available
if subcontext is not None:
ctxt = tuple([parent_realm, parent_id, subcontext])
self.id = self.forms.get_tracform_meta(ctxt)[0]
self._get_siblings(parent_realm, parent_id)
if isinstance(parent_realm, basestring) and \
parent_id is not None:
self.resource = Resource(parent_realm, parent_id
).child('form', self.id, version)
else:
raise ResourceNotFound(
_("""No data recorded for a TracForms form in
%(realm)s:%(parent_id)s
""", realm=parent_realm, parent_id=parent_id),
subcontext and _("with subcontext %(subcontext)s",
subcontext=subcontext) or '')
def _get_siblings(self, parent_realm, parent_id):
"""Add siblings list including self to form resource object."""
self.siblings = self.forms.get_tracform_ids(tuple([parent_realm,
parent_id]))
if len(self.siblings) == 1:
# form_id in single form situation
self.id = self.siblings[0][0]
self.subcontext = self.siblings[0][1]
@property
def has_data(self):
"""Return whether there is any form content stored."""
return (self.forms.get_tracform_fields(self.id) \
.firstrow is not None or \
self.forms.get_tracform_history(self.id) \
.firstrow is not None or \
self.forms.get_tracform_state(self.id) not in [None, '{}'])
| [
"hasienda@7322e99d-02ea-0310-aa39-e9a107903beb"
] | hasienda@7322e99d-02ea-0310-aa39-e9a107903beb |
8af41c09b124f2ec5b82fef8804ae4eefd794aa5 | 4759db9f7e74cec91edbb4c18c553b92913d1695 | /adafruit_atecc/adafruit_atecc_cert_util.py | 415c17ab0cb4833d4b867b6891196d9eb11ca90d | [
"MIT",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brentru/Adafruit_CircuitPython_ATECC | 9702e8e06123ab258fee39baf3462640401f9f28 | cceac6431ff28edcf410c53fc2db0c357533d774 | refs/heads/master | 2020-07-27T13:53:31.604065 | 2019-09-17T20:17:00 | 2019-09-17T20:17:00 | 209,113,921 | 1 | 0 | MIT | 2019-09-17T17:15:21 | 2019-09-17T17:15:21 | null | UTF-8 | Python | false | false | 6,488 | py | # Copyright (c) 2018 Arduino SA. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Brent Rubell for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_atecc_cert_util`
================================================================================
Certification Generation and Helper Utilities for the Adafruit_ATECC Module.
* Author(s): Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from adafruit_binascii import b2a_base64
import adafruit_atecc.adafruit_atecc_asn1 as asn1
class CSR:
"""Certificate Signing Request Builder.
:param adafruit_atecc atecc: ATECC module.
:param slot_num: ATECC module slot (from 0 to 4).
:param bool private_key: Generate a new private key in selected slot?
:param str country: 2-letter country code.
:param str state_prov: State or Province name,
:param str city: City name.
:param str org: Organization name.
:param str org_unit: Organizational unit name.
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self, atecc, slot_num, private_key, country, state_prov,
city, org, org_unit):
self._atecc = atecc
self.private_key = private_key
self._slot = slot_num
self._country = country
self._state_province = state_prov
self._locality = city
self._org = org
self._org_unit = org_unit
self._common = self._atecc.serial_number
self._version_len = 3
self._cert = None
self._key = None
def generate_csr(self):
"""Generates and returns a certificate signing request."""
self._csr_begin()
csr = self._csr_end()
return csr
def _csr_begin(self):
"""Initializes CSR generation. """
assert 0 <= self._slot <= 4, "Provided slot must be between 0 and 4."
# Create a new key
self._key = bytearray(64)
if self.private_key:
self._atecc.gen_key(self._key, self._slot, self.private_key)
return
self._atecc.gen_key(self._key, self._slot, self.private_key)
def _csr_end(self):
"""Generates and returns
a certificate signing request as a base64 string."""
len_issuer_subject = asn1.issuer_or_subject_length(self._country, self._state_province,
self._locality, self._org,
self._org_unit, self._common)
len_sub_header = asn1.get_sequence_header_length(len_issuer_subject)
len_csr_info = self._version_len + len_issuer_subject
len_csr_info += len_sub_header + 91 + 2
len_csr_info_header = asn1.get_sequence_header_length(len_csr_info)
# CSR Info Packet
csr_info = bytearray()
# Append CSR Info --> [0:2]
asn1.get_sequence_header(len_csr_info, csr_info)
# Append Version --> [3:5]
asn1.get_version(csr_info)
# Append Subject --> [6:7]
asn1.get_sequence_header(len_issuer_subject, csr_info)
# Append Issuer or Subject
asn1.get_issuer_or_subject(csr_info, self._country, self._state_province,
self._locality, self._org, self._org_unit, self._common)
# Append Public Key
asn1.get_public_key(csr_info, self._key)
# Terminator
csr_info += b"\xa0\x00"
# Init. SHA-256 Calculation
csr_info_sha_256 = bytearray(64)
self._atecc.sha_start()
for i in range(0, len_csr_info + len_csr_info_header, 64):
chunk_len = (len_csr_info_header + len_csr_info) - i
if chunk_len > 64:
chunk_len = 64
if chunk_len == 64:
self._atecc.sha_update(csr_info[i:i+64])
else:
csr_info_sha_256 = self._atecc.sha_digest(csr_info[i:])
# Sign the SHA256 Digest
signature = bytearray(64)
signature = self._atecc.ecdsa_sign(self._slot, csr_info_sha_256)
# Calculations for signature and csr length
len_signature = asn1.get_signature_length(signature)
len_csr = len_csr_info_header + len_csr_info + len_signature
asn1.get_sequence_header_length(len_csr)
# append signature to csr
csr = bytearray()
asn1.get_sequence_header(len_csr, csr)
# append csr_info
csr += csr_info
asn1.get_signature(signature, csr)
# encode and return
csr = b2a_base64(csr)
return csr
| [
"robots199@me.com"
] | robots199@me.com |
a7174edb2714ff5d5cc971dcc12be8543722ecbf | 8a5a98e1bbbc2a6f2b9d29c0124c6d5b5b4ce9af | /functionalprogramming/employemap.py | 2040d6ff00dedcbc5b6f9ad66e2a02759820a564 | [] | no_license | melbinmathew425/universe | acd9c163c66eeaac29db0e670735d283bb358ad8 | b4675ce08c7400f68b9b83592bf945dca4411833 | refs/heads/master | 2023-04-12T18:50:19.989424 | 2021-04-19T10:05:53 | 2021-04-19T10:05:53 | 358,550,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | class Emplo:
def __init__(self,eid,ename,desig,salary):
self.eid=eid
self.ename=ename
self.desig=desig
self.salary=salary
def print_emp(self):
print(self.ename)
b1=Emplo(100,"anu","developer",23000)
b2=Emplo(101,"manu","R&D",25000)
b3=Emplo(102,"maya","manager",21000)
b4=Emplo(103,"jittu","sw tester",28000)
employee=[]
employee.append(b1)
employee.append(b2)
employee.append(b3)
employee.append(b4)
# sal=[] CONVENTIONAL APPROACH
# for emp in employee:
# sal.append(emp.salary)
# print(sal)
salary=list(map(lambda emp:emp.salary,employee))
hs=max(salary)
print(salary)
print(hs) | [
"kelans995@gmail.com"
] | kelans995@gmail.com |
98897de6751e1925e957dc87ff9bf9135757db2d | 825aca806721b8e639e5d704e43df1f1f74aebd8 | /venv/bin/autopep8 | ed085d54d9bdb0fba08c765a9eaec8a800527ae2 | [] | no_license | eahnivy8/Sparta | 79c5797a2043c7fc82ff9827b6dd37a29ff4e354 | 6d0e99bbeed7088e480dd412cc211d60127964b6 | refs/heads/master | 2022-11-07T07:19:28.858937 | 2020-06-26T04:39:00 | 2020-06-26T04:39:00 | 264,405,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | #!/Users/edwardahn/Desktop/Sparta/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.2','console_scripts','autopep8'
__requires__ = 'autopep8==1.5.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.5.2', 'console_scripts', 'autopep8')()
)
| [
"eahnivy8@gmail.com"
] | eahnivy8@gmail.com | |
4298a183a60a96774b1216ec74482253f0849202 | 11cf2a585243a640d462ad6b075fe99e90e38897 | /.lint.py | 26081d830a1f5278b2eb263ae79a2149ad1dfe42 | [] | no_license | mirjak/draft-deconinck-multipath-quic | 1d8c4bd9498ef2d049ca4624ad28a9b322865c91 | 4975ac93b25981c3e75b00d3175dd0f8b28d0b36 | refs/heads/master | 2023-01-11T18:41:19.407161 | 2020-11-02T19:46:24 | 2020-11-02T19:46:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | #!/usr/bin/env python3
import sys
import argparse
import re
parser = argparse.ArgumentParser(description='Lint markdown drafts.')
parser.add_argument('files', metavar='file', nargs='+', help='Files to lint')
parser.add_argument('-l', dest='maxLineLength', default=80)
parser.add_argument('-f', dest='maxFigureLineLength', default=65)
args = parser.parse_args()
foundError = False
for inputfile in args.files:
insideFigure = False
beforeAbstract = True
with open(inputfile, mode='rt', newline=None, encoding='utf-8') as draft:
linecounter = 1
lines = draft.readlines()
abstract = re.compile('^--- abstract')
table = re.compile('^\s*(?:\||{:)')
figure = re.compile('^[~`]{3,}')
for line in lines:
line = line.rstrip('\r\n')
linenumber = linecounter
linecounter += 1
# Skip everything before abstract
if beforeAbstract:
matchObj = abstract.match(line)
if matchObj:
beforeAbstract = False
continue
# Skip tables
matchObj = table.match(line)
if matchObj:
continue
# Toggle figure state
matchObj = figure.match(line)
if matchObj:
insideFigure = not insideFigure
continue
# Check length
length = len(line)
limit = args.maxFigureLineLength if insideFigure else args.maxLineLength
if length > limit:
foundError = True
sys.stderr.write("{0}: Line is {1} characters; limit is {2}\n".format(
linenumber, length, limit))
sys.stderr.write("{0}\n".format(line))
sys.exit(1 if foundError else 0)
| [
"quentin.deconinck@uclouvain.be"
] | quentin.deconinck@uclouvain.be |
65b5db19c0c8e94f3a380bfd841c61ac94e5b269 | 8fb898f222110b970ae421b1cf3a1eaa4a674fb4 | /setup.py | 53b72022cc19e408570886c295f4455447cb6aea | [] | no_license | wharton/django-data-tables-tags | 6d6e2abd7be98e8db87aa61571e51d0867ccaa7b | 329911f6ca71c802bda6387eda35c0e11342e787 | refs/heads/main | 2022-06-12T05:48:07.661693 | 2022-05-27T19:23:44 | 2022-05-27T19:23:44 | 242,775,077 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="django-data-tables-tags",
description="Django template tags for jQuery DataTables.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Timothy Allen",
author_email="tallen@wharton.upenn.edu",
url="https://github.com/wharton/django-data-tables-tags",
include_package_data=True,
packages=find_packages(),
zip_safe=False,
install_requires=["Django>=2"],
setup_requires=["setuptools_scm"],
use_scm_version=True,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Framework :: Django",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
)
| [
"tallen@wharton.upenn.edu"
] | tallen@wharton.upenn.edu |
ffb27003b54e5fba375bc3c719e64f1319dcbffe | 74a19cf9b487c2d88368b7c9af4589d1887a03b2 | /os_tools.py | 549a5991bdbf45ad38409b27d56e7006d7161eba | [] | no_license | foreswearer/gist_music | ced21d78cf9d1402657a7bde53aeb969f07539af | 78905bab9fd2ec32ddada27de32d8f77ad52b65b | refs/heads/master | 2023-04-11T20:44:48.972034 | 2022-04-24T17:09:24 | 2022-04-24T17:09:24 | 469,429,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import os
import shutil
from datetime import datetime
__level = 4
def reset_dir(dir_to_be_reset):
# make the top level directory
try:
shutil.rmtree(dir_to_be_reset)
except OSError:
pass
try:
os.mkdir(dir_to_be_reset)
except OSError:
pass
def log(message, level):
if level <= __level:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(f'{current_time}|> {message}')
| [
"ramiro.rego@gmail.com"
] | ramiro.rego@gmail.com |
539fae5682994014bc3638e138e74f8815231a5b | 5659d0fd423497bbf4234ff3e1ee7732e802fc4f | /test.py | 23f0e66c886986bb379a9477666db04655b4a41a | [] | no_license | jefffall/python_test_grocery_list | 1748facb741599b610364228669d6201ab6d1ab2 | b6da3b2c296a505607301b156f1082f5f44c3bff | refs/heads/main | 2022-12-20T23:07:04.585389 | 2020-10-01T19:41:10 | 2020-10-01T19:41:10 | 300,402,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | prices = {'apple': '4.3', 'banana': 4.50}
my_purchase = {
'apple': 1,
'banana': 6}
grocery_bill = sum(float(prices[fruit]) * float(my_purchase[fruit])
for fruit in my_purchase)
print ('I owe the grocer $%.2f' % grocery_bill) | [
"noreply@github.com"
] | noreply@github.com |
04ab58ced188affbe5375710d236b62a2a8728a9 | fe356f30f79dd5815ff9b3373eb77d8ffc7076fb | /python/Class.py | 8a021bc1ca29dd8266ea851f6e6aaeedbffc1189 | [] | no_license | paolosabatini/ISTAT_HealthAnalysis | a3b6e9b0a135b0c650bcddfbdf3acccf42f7d3b7 | 3063d3e784f8cc87e1f0ad99be12b0b161372ad5 | refs/heads/master | 2020-03-27T14:34:24.408296 | 2019-04-28T11:58:57 | 2019-04-28T11:58:57 | 146,670,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import os
from Converter import *
class Interview:
def __init__(self, dic):
self.id = dic['PID']
self.geo = geoConverter(dic['RIP'])
self.smokeflag = smokeConverter(dic['SK1'])
| [
"paolosbtn@gmail.com"
] | paolosbtn@gmail.com |
88fde4953ea93f45918c4891940b3b494d26ae2f | 7623d4ca5cacb259a1b2e7a98b1e8a3011592348 | /SICP/examples/ex2_83.py | b372d8db084f7f17d5cb1e2e2f63db57d0db0e8f | [] | no_license | nextdesusu/Learn-Python | 3b875ab5093844fe64cc13e717a3637bdfe62a9a | 3212059408eec27ee2ed359ac9d691b3d061372f | refs/heads/master | 2022-01-29T07:39:11.915177 | 2019-07-21T14:18:10 | 2019-07-21T14:18:10 | 198,063,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | def gcd(a, b):
while a != 0 and b != 0:
if a > b:
a = a % b
else:
b = b % a
return a + b
#print(gcd(50, 130))
class Complex:
def __init__(self, real, imag = 0):
self.real = real
self.imag = imag
def __str__(self):
return '{0} + {1}i'.format(self.real, self.imag)
class Rational:
def __init__(self, n, m):
self.n = n
if m == 0:
raise 1 / 0
self.m = m
@property
def equate(self):
return self.n / self.m
def __add__(self, other):
if isinstance(other, Rational):
return Rational((self.n + other.n) / gcd(self.n + other.n, self.m + other.m),
(self.m + other.m) / gcd(self.n + other.n, self.m + other.m))
def __str__(self):
return '{0} / {1}'.format(self.n, self.m)
def raise_(num):
if isinstance(num, int):
return Rational(num, 1)
if isinstance(num, Rational):
return float(num.equate)
if isinstance(num, float):
return Complex(num, 0)
a = 1
print(a)
a = raise_(a)
print(a)
a = raise_(a)
print(a)
a = raise_(a)
print(a) | [
"noreply@github.com"
] | noreply@github.com |
ecadda233d55e5a381cea2a473aabeb40e553cf4 | f32e9b464a8c9fb7f5238935cfb5f83e840269e6 | /chat.py | 9bba623185a4235e003e9897cc735374256095c4 | [] | no_license | DavidArmendariz/python-chatbot | c192fc5f310d7c069c2a58b165ff8d90a1ceff2b | c7df66d4e0ae64c79ab75cc5cb58690efa677c23 | refs/heads/master | 2022-12-18T18:38:38.375681 | 2020-09-28T19:10:11 | 2020-09-28T19:10:11 | 258,566,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from app import app, db
from app.models import User, Message, Chatroom
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Message': Message, 'Chatroom': Chatroom} | [
"darmendariz1998@outlook.com"
] | darmendariz1998@outlook.com |
67b2476bf0da69a387ba3b0eac38cf08671c1edc | c539a8c5ea536f96eb83139f4988186dee796232 | /venv/Scripts/pasteurize-script.py | 5f7ae5b191cec1993e357b5bd347a3dc9070d062 | [] | no_license | risabhmishra/RisChat | 10afd0414ab82013607ce5f378ba9a7f8c337b1a | 4a7e773fe0fb0b2d140ec090f542cac85d11dc2f | refs/heads/master | 2020-03-21T20:30:45.039376 | 2018-06-28T11:59:35 | 2018-06-28T11:59:35 | 139,011,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | #!"C:\Users\Risabh Mishra\PycharmProjects\beeware\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.16.0','console_scripts','pasteurize'
__requires__ = 'future==0.16.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.16.0', 'console_scripts', 'pasteurize')()
)
| [
"rishabh.mshr@yahoo.in"
] | rishabh.mshr@yahoo.in |
a612fd302a3b107dc189f3aaa42b33aacb217e12 | f6c9907b2eaa7c4d9b48cf9d8605abe3181fff3b | /lovelips/migrations/0008_auto_20150624_0628.py | 76bd87c717236377a64b2c336ca9e0fdb2c28b28 | [] | no_license | sukmadyu/lovelips | c109a43577e6528d97cd3cfa61dc4eb6397f7403 | d21e9b023a8ad8444cf7071dab10bca428d70ed3 | refs/heads/master | 2021-01-17T07:02:43.279803 | 2015-10-06T10:25:52 | 2015-10-06T10:25:52 | 37,252,838 | 0 | 0 | null | 2016-05-02T05:02:49 | 2015-06-11T09:48:13 | CSS | UTF-8 | Python | false | false | 447 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lovelips', '0007_auto_20150624_0627'),
]
operations = [
migrations.AlterField(
model_name='alternatif',
name='kri',
field=models.TextField(null=True, blank=True),
preserve_default=True,
),
]
| [
"sukmadyu@gmail.com"
] | sukmadyu@gmail.com |
32bc36980bd85af045910d5303f1b1c037b8938f | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/RSG/RSGfragments/RSGravitonToGammaGamma_kMpl01_M_5750_TuneCP5_13TeV_pythia8_cfi.py | dbf0343665487d8f89199e7c5e5a6aaec7a57103 | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,324 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.Pythia8aMCatNLOSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8aMCatNLOSettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 0.541643794389',
'5100039:m0 = 5750.0',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8aMCatNLOSettings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"uzzie.perez@cern.ch"
] | uzzie.perez@cern.ch |
1fd6d943b7777ef60139bbb6ed83f5e7cd902b6a | fa6374867b4dad8942a4948de66e6308022e1f77 | /module01/01_hello.py | 900faf99f6a64d0541c4826083fed0bde23a17e7 | [] | no_license | jzapanta-snhu/it-140-zapanta-examples | b6410b174c96c25870b08dbfb93ece985024ee94 | 8084fefa165f8c95e80d86097dcc5b14905c97e0 | refs/heads/master | 2021-07-17T10:46:57.769190 | 2020-06-27T14:02:06 | 2020-06-27T14:02:06 | 185,298,725 | 0 | 2 | null | 2020-06-27T14:02:07 | 2019-05-07T01:36:56 | Python | UTF-8 | Python | false | false | 284 | py | # NAME: Javier E. Zapanta (j.zapanta@snhu.edu)
# DATE: 2019 May 06
# COURSE: IT-140
# PROGRAM: Hello World
#
# PURPOSE: This program will print "Hello World" to the terminal.
# RUNTIME: Python 2+
# prints "Hello World" with an implied \n (new line)
print("Hello World")
| [
"j.zapanta@snhu.edu"
] | j.zapanta@snhu.edu |
c0f92a66d2fe479fac5c31e96b84fa83550c245b | 9a543f49b15d43aa33fb663ae9ff917515cba022 | /posts/migrations/0006_auto_20170702_1912.py | 6497eaca1bab94d68b46dab4e03af0e8a9fa0098 | [] | no_license | zhl146/PortfolioBackend | 7f356e5615621c0e22c93e569f6f329522bb36cd | ab150c75dab0237afde7bca21357a61354284be0 | refs/heads/master | 2020-12-03T00:06:39.456963 | 2017-07-12T02:49:17 | 2017-07-12T02:49:17 | 95,988,886 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-02 23:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_auto_20170702_1256'),
]
operations = [
migrations.AlterField(
model_name='content',
name='abstract',
field=models.TextField(blank=True, default=None, max_length=1024, null=True),
),
]
| [
"jknoxiii@gmail.com"
] | jknoxiii@gmail.com |
b12b10856a4ed41d9955fd88843bf4578be7a5d2 | 49391296cb1d28db443f518fcfd14e0e76afd3c8 | /main/migrations/0003_request.py | 53e9282b13d9ae091b62f436ed85160c8bf6a311 | [] | no_license | Ppolyak/clinic | 72e8a59f3a1841880b94469a29dad2a73d9cc672 | 3204dc575166788d3e6ab066cb0a882c7a1d11ff | refs/heads/master | 2022-07-28T11:29:12.026257 | 2020-05-16T14:48:21 | 2020-05-16T14:48:21 | 264,459,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # Generated by Django 2.2 on 2020-05-05 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_auto_20190425_0242'),
('main', '0002_auto_20190424_0140'),
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(blank=True, choices=[('Waiting', 'Waiting'), ('Declined', 'Declined'), ('Approved', 'Approved')], default='Waiting', max_length=15, null=True)),
('client_from', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.Client')),
('doctor_to', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.Doctor')),
],
),
]
| [
"pasha_polyakov8@mail.ru"
] | pasha_polyakov8@mail.ru |
4350a9276b09f70ffaedbc01c41f946cdbcf5634 | 5c12dea736f83d6198c9e05df01087e2e9992c03 | /tweet_collector/save_tweets.py | c2abb7a0914a190089419ec840a90b7ab82ffd31 | [
"MIT"
] | permissive | fe-bern/Twitter-Slackbot | 84f199a48509b91240c7eeac8b915d2e30312cec | 18f93472e8fb67f9d7b7de8cd595265bb608fad1 | refs/heads/main | 2023-02-18T10:58:04.772024 | 2021-01-06T15:55:24 | 2021-01-06T15:55:24 | 327,355,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | #not neeeded anymore beacause get_tweets.py is directly connecting to MongoDB
import pandas as pd
import pymongo
df = pd.read_csv('tweets.csv')
# remove columns with dots and strange chars
for c in df:
if '.' in c or '#' in c:
del df[c]
# create a list of dictionaries
r = df.to_dict(orient='records')
# connect to local MongoDB
client = pymongo.MongoClient()
db = client.tweets #use pokemon database
# write to a collection called pokemon_data
db.tweets_data.insert_many(r)
# read
for x in db.tweets_data.find({'Name': {'$in': ['Pikachu', 'Bulbasaur']}
}):
print(x)
| [
"noreply@github.com"
] | noreply@github.com |
5d138e5360cc2f70a545930243755257cfe16faa | 2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6 | /baekjoon/accepted/16235 나무 재테크.py | 8b87c7f358caec812d47334277bfd5184b39dafd | [] | no_license | grasshopperTrainer/coding_practice | 530e9912b10952c866d35d69f12c99b96959a22d | d1e5e6d6fa3f71f1a0105940fff1785068aec8b0 | refs/heads/master | 2023-06-01T13:30:15.362657 | 2021-06-08T08:40:15 | 2021-06-08T08:40:15 | 267,359,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | from sys import stdin
# import heapq
from collections import deque
def solution(N, M, K, supplement, trees):
ground = [[5]*N for _ in range(N)]
forest = [[deque() for _ in range(N)] for _ in range(N)]
for x, y, z in trees:
forest[x-1][y-1].append(z)
for _ in range(K):
num_new_trees = [[0]*N for _ in range(N)]
num_trees = 0
# age
for x in range(N):
for y in range(N):
# forest[x][y].sort(reverse=True)
tree_aged = deque()
while forest[x][y] and forest[x][y][0] <= ground[x][y]:
tree = forest[x][y].popleft()
tree_aged.append(tree+1)
ground[x][y] -= tree
num_trees += 1
# propagate
if (tree + 1) % 5 == 0:
for dx in range(-1, 2):
for dy in range(-1, 2):
if (dx, dy) == (0, 0):
continue
nx, ny = x + dx, y + dy
if 0 <= nx < N and 0 <= ny < N:
num_new_trees[nx][ny] += 1
num_trees += 1
# calculated nutrition from dead trees
ground[x][y] += sum(age >> 1 for age in forest[x][y])
# update aged trees
forest[x][y] = tree_aged
# supplement
ground[x][y] += supplement[x][y]
# not to sort, update propagation after
for x in range(N):
for y in range(N):
for _ in range(num_new_trees[x][y]):
forest[x][y].appendleft(1)
return num_trees
N, M, K = map(int, stdin.readline().strip().split(' '))
supplement = [tuple(map(int, stdin.readline().strip().split(' '))) for _ in range(N)]
tree = [tuple(map(int, stdin.readline().strip().split(' '))) for _ in range(M)]
print(solution(N, M, K, supplement, tree)) | [
"46477711+grasshopperTrainer@users.noreply.github.com"
] | 46477711+grasshopperTrainer@users.noreply.github.com |
61ff5c8334ef68165decb5ebf79ac24e635cf43d | 8583e750cd8f9661c1ee02e41a9b9c543d82ea9a | /interface/telaLista.py | 7a9d549f8a3ae39f945a5a57ffe9200d16de47ab | [] | no_license | virgilio09/Controle-de-estoque | 4f1174b5bef3fa86ee0f7b24e65814deafdd73dd | cdf2179ed08115b06e4ad19bd2bee1f1c5bf29ba | refs/heads/main | 2023-02-25T23:24:28.003027 | 2021-02-01T13:49:02 | 2021-02-01T13:49:02 | 317,719,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,752 | py | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
class TelaListProd(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(30, 60, 581, 361))
self.tableWidget.setGridStyle(QtCore.Qt.NoPen)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(40, 20, 101, 17))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(110, 20, 101, 25))
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(220, 20, 61, 25))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(310, 20, 201, 17))
font = QtGui.QFont()
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(500, 20, 51, 25))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(30, 430, 51, 21))
font = QtGui.QFont()
font.setPointSize(9)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Código"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Nome"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Valor"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Quantidade"))
self.label.setText(_translate("MainWindow", "Pesquisar:"))
self.pushButton.setText(_translate("MainWindow", "Buscar"))
self.label_2.setText(_translate("MainWindow", "Selecione um item para excluir"))
self.pushButton_2.setText(_translate("MainWindow", "Excluir"))
self.pushButton_3.setText(_translate("MainWindow", "Voltar"))
class TelaListaCli(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(290, 40, 331, 411))
self.tableWidget.setGridStyle(QtCore.Qt.NoPen)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 40, 101, 17))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(10, 70, 101, 25))
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(130, 70, 61, 25))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 210, 201, 17))
font = QtGui.QFont()
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(140, 240, 51, 25))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(30, 430, 51, 21))
font = QtGui.QFont()
font.setPointSize(9)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Nome"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "CPF"))
self.label.setText(_translate("MainWindow", "Pesquisar:"))
self.pushButton.setText(_translate("MainWindow", "Buscar"))
self.label_2.setText(_translate("MainWindow", "Selecione um item para excluir"))
self.pushButton_2.setText(_translate("MainWindow", "Excluir"))
self.pushButton_3.setText(_translate("MainWindow", "Voltar"))
class TelaListaFunc(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(270, 40, 351, 411))
self.tableWidget.setGridStyle(QtCore.Qt.NoPen)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 40, 101, 17))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(10, 70, 101, 25))
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(130, 70, 61, 25))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 210, 201, 17))
font = QtGui.QFont()
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(140, 240, 51, 25))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(30, 430, 51, 21))
font = QtGui.QFont()
font.setPointSize(9)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Nome"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "CPF"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Salario"))
self.label.setText(_translate("MainWindow", "Pesquisar:"))
self.pushButton.setText(_translate("MainWindow", "Buscar"))
self.label_2.setText(_translate("MainWindow", "Selecione um item para excluir"))
self.pushButton_2.setText(_translate("MainWindow", "Excluir"))
self.pushButton_3.setText(_translate("MainWindow", "Voltar")) | [
"jvirgiliomartins09@gmail.com"
] | jvirgiliomartins09@gmail.com |
95a72a82cd9e5ae2c7086705148b100130173937 | a5037e3408fe54bee25e26a994c2c6a39aea4de0 | /env/bin/pylint | fa81f2eaec5bbacd5cea443dcf19eddb11930a5e | [] | no_license | alexzhou124/Webchecker | cb0e5eeecd57d4a6dac97fb88bc04be5e37e6761 | a20c22217f53a103b8196fe42f91fdacb9cbdcaa | refs/heads/master | 2022-07-31T13:45:57.860415 | 2020-05-26T04:38:14 | 2020-05-26T04:38:14 | 263,674,799 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | #!/Users/alexzhou/Desktop/PyScripts/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"alexzhou@alexs-mbp.lan"
] | alexzhou@alexs-mbp.lan | |
a883fab54ea913ab199a9734c1295bcec74c8769 | 530505736a7f8017edbc60dae4be6d21f3ace122 | /Paint/Paint.py | 6baa2c8d6389c69a495470caefa78741a138365c | [] | no_license | DanielJulian/OpenCV | d31cadb78b00d39571a7bbdd95dd60c5e9c7cd32 | 11af2c4672b3e72f6c891cdf6178c9c0f600e456 | refs/heads/master | 2021-01-11T20:58:45.928325 | 2017-03-23T00:17:10 | 2017-03-23T00:17:10 | 79,223,847 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,966 | py | import cv2
import numpy as np
from PIL import Image, ImageTk
best_cnt = 1
blank_image = np.zeros((400,600,4), np.uint8)
cx=0
cy=0
prev_x = cx
prev_y = cy
draw_color = (255,0,0)
color_radius = 40
def set_image(lbl,img):
img = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=img)
lbl.imgtk = imgtk
lbl.configure(image=imgtk)
def in_circle(center_x, center_y, radius, x, y):
square_dist = (center_x - x) ** 2 + (center_y - y) ** 2
return square_dist <= radius ** 2
def in_rectangle(x1,y1,x4,y4,cx,cy):
if (cx>x1 and cx<x4) and (cy>y1 and cy<y4):
return True
return False
def start():
cap = cv2.VideoCapture(0)
global best_cnt,blank_image,prev_x,prev_y,cy,cx,draw_color
while(True):
flag, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = cv2.resize(frame, (600,400))
orig_frame = frame.copy();
frame = cv2.blur(frame,(3,3))
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
thresh = cv2.inRange(hsv,np.array((26, 80, 84)), np.array((40, 255, 255)))
thresh2 = thresh.copy()
_, contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
max_area = area
best_cnt = cnt
else:
max_area = 0
M = cv2.moments(best_cnt)
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
print "x:",cx,"y:",cy
if in_rectangle(551,00,600,50,cx,cy):
draw_color = (255,0,0)
print "inside3"
if in_rectangle(501,000,550,50,cx,cy):
draw_color = (0,255,0)
print "inside2"
if in_rectangle(450,000,500,50,cx,cy):
draw_color = (0,0,255)
print "inside1"
if(abs(cx-prev_x)>100 or abs(cy-prev_y)>100):
cv2.circle(blank_image,(cx,cy),5,draw_color,-1)
else:
cv2.line(blank_image,(cx,cy),(prev_x,prev_y),draw_color,3)
img_cpy = orig_frame.copy()
cv2.rectangle(img_cpy,(450,000),(500,50),(255,0,0),-1) #B
cv2.rectangle(img_cpy,(500,000),(550,50),(0,255,0),-1) #g
cv2.rectangle(img_cpy,(550,000),(600,50),(0,0,255),-1) #r
opacity = 0.5
cv2.addWeighted(img_cpy, opacity, orig_frame, 1 - opacity, 0, orig_frame)
orig_frame = cv2.cvtColor(orig_frame, cv2.COLOR_BGR2RGBA)
final = cv2.add(orig_frame,blank_image)
final = cv2.cvtColor(final, cv2.COLOR_RGBA2BGR)
img = Image.fromarray(final)
cv2.imshow('thresh',thresh2)
cv2.imshow('final',final)
#cv2.imshow(final)
prev_x = cx
prev_y = cy
k = cv2.waitKey(10)
if k == 27:
break
start()
| [
"dani_77@live.com.ar"
] | dani_77@live.com.ar |
9ade69f871a50e43da6e0373463e9d27ef7ea1d8 | a38be66933680563889fde53eb0d3810e2bcee83 | /func/findPath_wCurves.py | b3d663c22daaba6ad00898e793094fe2df8bae78 | [] | no_license | seanjennings960/PathPlanningChallenge | eb7999881af8bb9752f40a406896ca3ea2830d0e | ad072c9a84296e9f6e3c51ed81459aeadf843028 | refs/heads/master | 2021-01-22T04:18:27.630626 | 2017-05-25T23:58:48 | 2017-05-25T23:58:48 | 92,453,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | from func.extractVisGraph import *
from func.Dijkstra import Dijkstra
from func.CurveObst import *
from func.plotting import *
import matplotlib.pyplot as plt
def findPath_wCurves(ObstacleList,CurveObstList,posStart,posGoal):
'''
Finds shortest path and path distance of given environment
Inputs: ObstacleList is list of polygonal obstacles [P1,P2,...]
with Pi a 2d array of vertices coordinates in CCW order
posStart and posGoal are 1d arrays with x and y coordinates of start and goal positions
Output: path is 2d array [[x0,y0],[x1,y1],...] of order coordinates in shortest path
pathDist is the total distance of the path
'''
for curveObst in CurveObstList:
P = getPolygon(curveObst, 2)
ObstacleList.append(P)
(V,E,w) = extractVisGraph(ObstacleList)
V = addStartAndGoalToGraph(posStart,posGoal,V,E,w,ObstacleList)
(path,pathDist) = Dijkstra(V,E,w)
return (path,pathDist)
| [
"noreply@github.com"
] | noreply@github.com |
c192337bb5be3ec94e513aecb2a49f60b4fffbe5 | 9df06c7c286b1a88ba4d9be1f9b9722b10199a90 | /lib/decisions.py | 4a2b74cfbd0d70d2ca16d712de5eef7a44a9ce0a | [
"Apache-2.0"
] | permissive | pcn/resgate | a01b4f805a3ff7fd15072400ec3dd2280f02e213 | 3aa6cda0f31d2b1bc5a74dbac3fa22a5fb3043ed | refs/heads/main | 2023-03-31T04:55:01.226529 | 2021-04-07T01:32:35 | 2021-04-07T01:32:35 | 350,059,056 | 1 | 0 | Apache-2.0 | 2021-03-28T02:01:02 | 2021-03-21T16:45:40 | Gherkin | UTF-8 | Python | false | false | 65 | py | # Read datalog statements, and edvaluate them to make decisions
| [
"spacey-github.com@ssr.com"
] | spacey-github.com@ssr.com |
3496db296e088ab5b474d57d635d971b8e919291 | 923a14dd594191d77e30465027ece8371f28a7a6 | /web-serpng/code/serpng/jobs/services/search/user_data_tests.py | a41f50ac118c451b073c3ebd84206912b868bae7 | [] | no_license | alyago/django-web | 3af7b3389df59104eaf5e50ed9cc2c3e730fed7f | da3073eec6d676dfe0164502b80d2a1c75e89575 | refs/heads/master | 2021-01-10T19:33:45.425520 | 2013-11-21T09:43:37 | 2013-11-21T09:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | """User Data Tests."""
from django.test import TestCase
import user_data
# JSON responses from the bridge to be used in the tests.
JSON_RESPONSE_WITH_NO_USER_DATA = {
'abc': 'I am not user data'
}
JSON_RESPONSE_WITH_GOOD_USER_DATA = {
'user_data': {
'recent_searches': ['rs1', 'rs2'],
'user_email': 'meow@cat.com',
'saved_jobs': {
'job1': {'comment': 'abc'},
'job2': {'comment': 'def'}
}
}
}
JSON_RESPONSE_WITH_BAD_USER_DATA = {
'user_data': {}
}
JSON_RESPONSE_WITH_EMPTY_ARRAY_SAVED_JOBS = {
'user_data': {
'saved_jobs': []
}
}
JSON_RESPONSE_WITH_NULL_COMMENT_SAVED_JOB = {
'user_data': {
'saved_jobs': {
'job1': {'comment': 'abc'},
'job2': {'comment': None}
}
}
}
# Tests
class UserDataTestCase(TestCase):
"""User Data TestCase."""
# pylint: disable=R0904
def test_no_user_data_in_json_response(self):
"""Default values should be correct when there is no user data."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_NO_USER_DATA)
self.assertIsNone(test_user_data.recent_searches)
self.assertIsNone(test_user_data.user_email)
self.assertEqual(test_user_data.saved_jobs, {})
def test_good_recent_searches(self):
"""Attribute 'recent_searches' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.recent_searches[1], 'rs2')
def test_good_user_email(self):
"""Attribute 'user_email' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.user_email, 'meow@cat.com')
def test_good_saved_jobs(self):
"""Attribute 'saved_jobs' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.saved_jobs['job1'], 'abc')
def test_no_recent_searches(self):
"""Attribute 'recent_searches' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertIsNone(test_user_data.recent_searches)
def test_no_user_email(self):
"""Attribute 'user_email' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertIsNone(test_user_data.user_email)
def test_no_saved_jobs(self):
"""Attribute 'saved_jobs' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertEqual(test_user_data.saved_jobs, {})
def test_empty_array_saved_jobs(self):
"""Attribute 'saved_jobs' should have good default value when saved_jobs is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_EMPTY_ARRAY_SAVED_JOBS)
self.assertEqual(test_user_data.saved_jobs, {})
def test_null_comment_saved_job(self):
"""Attribute 'saved_jobs' should convert null comments to empty strings."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_NULL_COMMENT_SAVED_JOB)
self.assertEqual(test_user_data.saved_jobs['job2'], '')
| [
"oleg@simplyhired.com"
] | oleg@simplyhired.com |
57c3e958ff5e5090f63c4b678e890e95821acb06 | 7818a372b1cc7ef3f12decd8304ea8a1afe9f966 | /trade_portal/trade_portal/users/views/users.py | 2a5936a14c762f0609f5b28d4d61bce20f1a2eb4 | [] | no_license | koriaf/trade_portal | 7682bd6f7a1bdd0c6471bc4162f1774508de9262 | d8632fda4bab2fb5dedd42dda556ce14ecb731fa | refs/heads/master | 2023-06-06T22:17:44.580684 | 2020-08-10T07:07:10 | 2020-08-10T07:07:24 | 286,424,417 | 0 | 0 | null | 2020-08-10T08:53:18 | 2020-08-10T08:53:17 | null | UTF-8 | Python | false | false | 3,799 | py | import logging
from django.core.exceptions import ValidationError
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import (
DetailView, RedirectView, UpdateView, View
)
from django.shortcuts import redirect
from django.urls import reverse
from trade_portal.users.models import Organisation, OrgRoleRequest
from trade_portal.users.forms import UserChangeForm
logger = logging.getLogger(__name__)
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
def get_object(self):
return self.request.user
def post(self, request, *args, **kwargs):
from trade_portal.users.tasks import notify_staff_about_evidence_uploaded
if "evidence" in request.FILES:
for validator in OrgRoleRequest._meta.get_field("evidence").validators:
try:
validator(request.FILES["evidence"])
except ValidationError as e:
messages.warning(request, e.messages[0])
return redirect(request.path_info)
req = OrgRoleRequest.objects.get(
pk=request.POST.get("request_id"),
created_by=request.user,
status__in=[
OrgRoleRequest.STATUS_EVIDENCE,
OrgRoleRequest.STATUS_REQUESTED
]
)
req.evidence = request.FILES["evidence"]
if req.status == OrgRoleRequest.STATUS_EVIDENCE:
req.status = OrgRoleRequest.STATUS_REQUESTED
notify_staff_about_evidence_uploaded.apply_async(
[req.id],
countdown=1
)
req.save()
messages.success(
request,
"The file has been uploaded as an evidence and the request has been sent to review"
)
return redirect(request.path_info)
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserChangeForm
def get_success_url(self):
return reverse("users:detail")
def get_object(self):
return User.objects.get(pk=self.request.user.pk)
def form_valid(self, form):
messages.info(
self.request, "Your profile has been updated successfully"
)
return super().form_valid(form)
def post(self, request, *args, **kwargs):
assert request.user.is_authenticated
return super().post(request, *args, **kwargs)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail")
user_redirect_view = UserRedirectView.as_view()
class ChangeOrgView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
new_org_id = request.POST.get("current_org")
next_url = request.GET.get("next") or request.POST.get("next") or "/"
assert next_url.startswith("/")
if request.user.is_staff:
# no need to check the permissions for that org
org = Organisation.objects.get(pk=new_org_id)
else:
org_ms = request.user.orgmembership_set.all().filter(
org_id=new_org_id
).first()
if not org_ms:
messages.error(request, "You don't have access to that org anymore")
return redirect(next_url)
org = org_ms.org
request.session["current_org_id"] = int(org.pk)
messages.success(request, f"The {org} has been selected as the current organisation")
return redirect(next_url)
| [
"koriaf@gmail.com"
] | koriaf@gmail.com |
25894a978235e5a7ba954ec8cdc0e0047e8254e1 | 2fd087fbc5faf43940153693823969df6c8ec665 | /pyc_decrypted/latest/dropbox/metadata/vorbis.py | e7e48da8552e55eb862035894baafb7a71cedce1 | [] | no_license | mickeystone/DropBoxLibrarySRC | ed132bbffda7f47df172056845e5f8f6c07fb5de | 2e4a151caa88b48653f31a22cb207fff851b75f8 | refs/heads/master | 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | #Embedded file name: dropbox/metadata/vorbis.py
from collections import defaultdict
import struct
from .utils import safe_read
def readVorbisComment(file_obj):
toret = defaultdict(list)
try:
vendor_length = struct.unpack('<I', safe_read(file_obj, 4))[0]
safe_read(file_obj, vendor_length)
user_comment_list_length = struct.unpack('<I', safe_read(file_obj, 4))[0]
for i in range(user_comment_list_length):
length = struct.unpack('<I', safe_read(file_obj, 4))[0]
comment = ''.join(struct.unpack('<%dc' % length, safe_read(file_obj, length)))
k, v = comment.split('=')
toret[k.lower()].append(v)
return toret
except Exception:
return {}
def decodeBlockPicture(file_obj):
try:
pic_type, mime_length = struct.unpack('>II', safe_read(file_obj, 8))
mime = ''.join(struct.unpack('>%dc' % mime_length, safe_read(file_obj, mime_length)))
desc_length = struct.unpack('>I', safe_read(file_obj, 4))[0]
description = unicode(''.join(struct.unpack('>%dc' % desc_length, safe_read(file_obj, desc_length))), 'utf-8')
width, height, depth, colors, data_len = struct.unpack('>IIIII', safe_read(file_obj, 20))
data = safe_read(file_obj, data_len)
return {'type': pic_type,
'mime': mime,
'description': description,
'width': width,
'height': height,
'depth': depth,
'colors': colors,
'data': data}
except Exception:
return {}
def readBlockPicture(file_obj):
try:
buf = ''
buf += safe_read(file_obj, 8)
pic_type, mime_length = struct.unpack('>II', buf[-8:])
buf += safe_read(file_obj, mime_length)
buf += safe_read(file_obj, 4)
desc_length = struct.unpack('>I', buf[-4:])[0]
buf += safe_read(file_obj, desc_length)
buf += safe_read(file_obj, 20)
width, height, depth, colors, data_len = struct.unpack('>IIIII', buf[-20:])
buf += safe_read(file_obj, data_len)
return {'metadata_block_picture': [buf]}
except Exception:
return {}
| [
"bizonix@me.com"
] | bizonix@me.com |
c7e7dfd52fef31ae09d405cc27fcade4afed608c | 99814fefd3ebe1ac34006c2e08fee7d1c504bf4c | /Database/Database/__init__.py | 9b5f901068c8ef94c618ce1301f284c63d2241e4 | [] | no_license | traustitj/TornadoMongoTest | c0d40f4d0b15f2804ed42559fe7ca69e950408ff | 8002bed6486016055eb22aaa0a33a0a6ac346ea1 | refs/heads/master | 2022-08-14T07:21:01.334987 | 2022-07-26T10:21:03 | 2022-07-26T10:21:03 | 131,761,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | import os.path
from database import ConnectDB
| [
"traustitj@mac.com"
] | traustitj@mac.com |
47063c6f9762cc541be468fe4120e733110426e3 | d7a68c636e6128533b17975655bd6b46ed222916 | /adapter-transformers-adapters3.1.0/src/transformers/models/decision_transformer/modeling_decision_transformer.py | 959b9763d0bd48505d91336461376868845b5345 | [
"Apache-2.0"
] | permissive | cambridgeltl/autopeft | 69179f8faf2cc4d2164ff78e544dc3fe2d39c331 | d8ad6bea93aa413a54d0e09fe25bdd62b46cfcf5 | refs/heads/main | 2023-05-23T09:21:59.912941 | 2023-04-25T14:35:31 | 2023-04-25T14:35:31 | 594,316,585 | 26 | 4 | Apache-2.0 | 2023-04-25T14:35:32 | 2023-01-28T06:39:25 | Python | UTF-8 | Python | false | false | 43,844 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DecisionTransformer model."""
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
if version.parse(torch.__version__) >= version.parse("1.6"):
is_amp_available = True
from torch.cuda.amp import autocast
else:
is_amp_available = False
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from .configuration_decision_transformer import DecisionTransformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
_CONFIG_FOR_DOC = "DecisionTransformerConfig"
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"edbeeching/decision-transformer-gym-hopper-medium",
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
]
# Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / torch.tensor(
value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(torch.bool)
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
if is_amp_available:
with autocast(enabled=False):
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
else:
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
if attn_weights.dtype != torch.float32:
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to"
" instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Block(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = DecisionTransformerGPT2Attention(
config, is_cross_attention=True, layer_idx=layer_idx
)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
"cross-attention layers by setting `config.add_cross_attention=True`"
)
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DecisionTransformerConfig
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if "c_proj" in name and "weight" in name:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DecisionTransformerGPT2Model):
module.gradient_checkpointing = value
class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Model.forward
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@dataclass
class DecisionTransformerOutput(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
Environment state predictions
action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
Model action predictions
return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
Predicted returns for each state
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
state_preds: torch.FloatTensor = None
action_preds: torch.FloatTensor = None
return_preds: torch.FloatTensor = None
hidden_states: torch.FloatTensor = None
attentions: torch.FloatTensor = None
last_hidden_state: torch.FloatTensor = None
class DecisionTransformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DecisionTransformerConfig
base_model_prefix = "decision_transformer"
main_input_name = "states"
supports_gradient_checkpointing = False
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
DECISION_TRANSFORMER_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
Args:
states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
The states for each step in the trajectory
actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
The actions taken by the "expert" policy for the current state, these are masked for auto regressive
prediction
rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The rewards for each state, action
returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The returns for each state in the trajectory
timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
The timestep for each step in the trajectory
attention_mask (`torch.LongTensor` of shape `(batch_size, episode_length)`):
Masking, used to mask the actions when performing autoregressive prediction
"""
@add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
"""
The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
# note: the only difference between this GPT2Model and the default Huggingface version
# is that the positional embeddings are removed (since we'll add those ourselves)
self.encoder = DecisionTransformerGPT2Model(config)
self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
self.embed_return = torch.nn.Linear(1, config.hidden_size)
self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
self.embed_ln = nn.LayerNorm(config.hidden_size)
# note: we don't predict states or returns for the paper
self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
self.predict_action = nn.Sequential(
*([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
)
self.predict_return = torch.nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
states=None,
actions=None,
rewards=None,
returns_to_go=None,
timesteps=None,
attention_mask=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
) -> Union[Tuple, DecisionTransformerOutput]:
r"""
Returns:
Examples:
```python
>>> from transformers import DecisionTransformerModel
>>> import torch
>>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
>>> # evaluation
>>> model = model.to(device)
>>> model.eval()
>>> env = gym.make("Hopper-v3")
>>> state_dim = env.observation_space.shape[0]
>>> act_dim = env.action_space.shape[0]
>>> state = env.reset()
>>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
>>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
>>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
>>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
>>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> # forward pass
>>> with torch.no_grad():
... state_preds, action_preds, return_preds = model(
... states=states,
... actions=actions,
... rewards=rewards,
... returns_to_go=target_return,
... timesteps=timesteps,
... attention_mask=attention_mask,
... return_dict=False,
... )
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length = states.shape[0], states.shape[1]
if attention_mask is None:
# attention mask for GPT: 1 if can be attended to, 0 if not
attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
# embed each modality with a different head
state_embeddings = self.embed_state(states)
action_embeddings = self.embed_action(actions)
returns_embeddings = self.embed_return(returns_to_go)
time_embeddings = self.embed_timestep(timesteps)
# time embeddings are treated similar to positional embeddings
state_embeddings = state_embeddings + time_embeddings
action_embeddings = action_embeddings + time_embeddings
returns_embeddings = returns_embeddings + time_embeddings
# this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
# which works nice in an autoregressive sense since states predict actions
stacked_inputs = (
torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
.permute(0, 2, 1, 3)
.reshape(batch_size, 3 * seq_length, self.hidden_size)
)
stacked_inputs = self.embed_ln(stacked_inputs)
# to make the attention mask fit the stacked inputs, have to stack it as well
stacked_attention_mask = (
torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
.permute(0, 2, 1)
.reshape(batch_size, 3 * seq_length)
)
device = stacked_inputs.device
# we feed in the input embeddings (not word indices as in NLP) to the model
encoder_outputs = self.encoder(
inputs_embeds=stacked_inputs,
attention_mask=stacked_attention_mask,
position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
x = encoder_outputs[0]
# reshape x so that the second dimension corresponds to the original
# returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
# get predictions
return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
action_preds = self.predict_action(x[:, 1]) # predict next action given state
if not return_dict:
return (state_preds, action_preds, return_preds)
return DecisionTransformerOutput(
last_hidden_state=encoder_outputs.last_hidden_state,
state_preds=state_preds,
action_preds=action_preds,
return_preds=return_preds,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| [
"hz416@cam.ac.uk"
] | hz416@cam.ac.uk |
dcfed5c9629b784d51e081eff28206ae47360593 | 4aabe0322e6a922c66ab1b116f6f106cecef338d | /script/audio/deepspeech.pytorch/test_thchs30.py | bf611011bc837b824ec0d888f3aa741a27e769b4 | [] | no_license | luo-pan/make_dataset | a04c0839211824ad8e7c8e993e7e3e54cc2cf5b7 | b54599cafa13c6483129c0c39529466a0e878e8d | refs/heads/master | 2021-01-25T13:18:34.285162 | 2017-09-06T13:12:24 | 2017-09-06T13:12:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,752 | py | import argparse
import json
import torch
from torch.autograd import Variable
import os
from data.data_loader import SpectrogramDataset, AudioDataLoader
# from decoder import GreedyDecoder, BeamCTCDecoder, Scorer, KenLMScorer
from decoder import GreedyDecoder, BeamCTCDecoder
from model import DeepSpeech
os.environ["CUDA_VISIBLE_DEVICES"]="2"
parser = argparse.ArgumentParser(description='DeepSpeech prediction')
parser.add_argument('--model_path', default='models/deepspeech_final.pth.tar',
help='Path to model file created by training')
parser.add_argument('--cuda', action="store_true", help='Use cuda to test model')
parser.add_argument('--test_manifest', metavar='DIR',
help='path to validation manifest csv', default='data/test_manifest.csv')
parser.add_argument('--batch_size', default=20, type=int, help='Batch size for training')
parser.add_argument('--num_workers', default=4, type=int, help='Number of workers used in dataloading')
parser.add_argument('--decoder', default="greedy", choices=["greedy", "beam"], type=str, help="Decoder to use")
beam_args = parser.add_argument_group("Beam Decode Options", "Configurations options for the CTC Beam Search decoder")
beam_args.add_argument('--beam_width', default=10, type=int, help='Beam width to use')
beam_args.add_argument('--lm_path', default=None, type=str, help='Path to an (optional) kenlm language model for use with beam search (req\'d with trie)')
beam_args.add_argument('--trie_path', default=None, type=str, help='Path to an (optional) trie dictionary for use with beam search (req\'d with LM)')
beam_args.add_argument('--lm_alpha', default=0.8, type=float, help='Language model weight')
beam_args.add_argument('--lm_beta1', default=1, type=float, help='Language model word bonus (all words)')
beam_args.add_argument('--lm_beta2', default=1, type=float, help='Language model word bonus (IV words)')
args = parser.parse_args()
if __name__ == '__main__':
model = DeepSpeech.load_model(args.model_path, cuda=args.cuda)
model.eval()
labels = DeepSpeech.get_labels(model)
audio_conf = DeepSpeech.get_audio_conf(model)
if args.decoder == "beam":
scorer = None
if args.lm_path is not None:
scorer = KenLMScorer(labels, args.lm_path, args.trie_path)
scorer.set_lm_weight(args.lm_alpha)
scorer.set_word_weight(args.lm_beta1)
scorer.set_valid_word_weight(args.lm_beta2)
else:
scorer = Scorer()
decoder = BeamCTCDecoder(labels, scorer, beam_width=args.beam_width, top_paths=1, space_index=labels.index(' '), blank_index=labels.index('_'))
else:
decoder = GreedyDecoder(labels, space_index=labels.index('<space>'), blank_index=labels.index('_'))
test_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.test_manifest, labels=labels,
normalize=True)
test_loader = AudioDataLoader(test_dataset, batch_size=args.batch_size,
num_workers=args.num_workers)
total_cer, total_wer = 0, 0
for i, (data) in enumerate(test_loader):
inputs, targets, input_percentages, target_sizes = data
inputs = Variable(inputs, volatile=True)
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.cuda:
inputs = inputs.cuda()
out = model(inputs)
out = out.transpose(0, 1) # TxNxH
seq_length = out.size(0)
sizes = input_percentages.mul_(int(seq_length)).int()
decoded_output = decoder.decode(out.data, sizes)
target_strings = decoder.process_strings(decoder.convert_to_strings(split_targets))
# print("out.data",out.data[0])
# print("decoded_output[0]",decoded_output[0])
# print("split_targets",split_targets[0])
# print("target_strings[0]",target_strings[0])
# print("decoder.convert_to_strings(split_targets)",decoder.convert_to_strings(split_targets)[0])
wer, cer = 0, 0
for x in range(len(target_strings)):
wer += decoder.wer(decoded_output[x], target_strings[x]) / float(len(target_strings[x].replace(' ','').split('<space>')))
cer += decoder.cer(decoded_output[x], target_strings[x]) / float(len(target_strings[x].split(' ')))
total_cer += cer
total_wer += wer
wer = total_wer / len(test_loader.dataset)
cer = total_cer / len(test_loader.dataset)
print('Test Summary \t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(wer=wer * 100, cer=cer * 100))
| [
"zhy8623080@163.com"
] | zhy8623080@163.com |
7dde7d7f90a82f371578edbbdd15c3b2f54ec643 | da22272ac9c9de6d3b380b3e66547055e7b01d20 | /hangouts.py | f7d0ab8e2d25056a11fd62d3e719a9b3a17f3a48 | [
"MIT"
] | permissive | kyyslauk/gtalk_export | 0f67a09c096388f91be90ca0baf6aca7bd0f4f7a | d56c408fc6df8045adf690ba9c97350ae20e096a | refs/heads/master | 2021-06-19T13:07:44.769010 | 2017-07-07T20:53:31 | 2017-07-07T20:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,772 | py | import re
import json
import time
# This code was inspired by Jay2K1's Hangouts parser. You can see the
# blogpost for the original at:
# http://blog.jay2k1.com/2014/11/10/how-to-export-and-backup-your-google-hangouts-chat-history/
# He also runs a webservice for parsing Google Hangouts JSON files at:
# http://hangoutparser.jay2k1.com/
def replaceSmileys(string):
# replaces UTF-8 graphical emoticons by their ASCII equivalents
# list of emoji codes taken from https://aprescott.com/posts/hangouts-emoji
patterns = [
u'\U0001F41D', # -<@% ? honeybee
u'\U0001F435', # :(|) ? monkey face
u'\U0001F437', # :(:) ? pig face
u'\U0001F473', # (]:{ ? man with turban
u'\U0001F494', # <\3 </3 ? broken heart
u'\U0001F49C', # <3 ? purple heart
u'\U0001F4A9', # ~@~ ? pile of poo
u'\U0001F600', # :D :-D ? grinning face
u'\U0001F601', # ^_^ ? grinning face with smiling eyes
u'\U0001F602', # XD
u'\U0001F603', # :) :-) =) ? smiling face with open mouth
u'\U0001F604', # =D ? smiling face with open mouth and smiling eyes
u'\U0001F605', # ^_^;; ? smiling face with open mouth and cold sweat
u'\U0001F607', # O:) O:-) O=) ? smiling face with halo
u'\U0001F608', # }:) }:-) }=) ? smiling face with horns
u'\U0001F609', # ;) ;-) ? winking face
u'\U0001F60E', # B) B-) ? smiling face with sunglasses
u'\U0001F610', # :-| :| =| ? neutral face
u'\U0001F611', # -_- ? expressionless face
u'\U0001F613', # o_o; ? face with cold sweat
u'\U0001F614', # u_u ? pensive face
u'\U0001F615', # :\ :/ :-\ :-/ =\ =/ ? confused face
u'\U0001F616', # :S :-S :s :-s ? confounded face
u'\U0001F617', # :* :-* ? kissing face
u'\U0001F618', # ;* ;-* ? face throwing a kiss
u'\U0001F61B', # :P :-P =P :p :-p =p ? face with stuck-out tongue
u'\U0001F61C', # ;P ;-P ;p ;-p ? face with stuck-out tongue and winking eye
u'\U0001F61E', # :( :-( =( ? disappointed face
u'\U0001F621', # >.< >:( >:-( >=( ? pouting face
u'\U0001F622', # T_T :'( ;_; ='( ? crying face
u'\U0001F623', # >_< ? persevering face
u'\U0001F626', # D: ? frowning face with open mouth
u'\U0001F62E', # o.o :o :-o =o ? face with open mouth
u'\U0001F632', # O.O :O :-O =O ? astonished face
u'\U0001F634', # O.O :O :-O =O ? astonished face
u'\U0001F635', # x_x X-O X-o X( X-( ? dizzy face
u'\U0001F638', # :X) :3 (=^..^=) (=^.^=) =^_^= ? grinning cat face with smiling eyes
u'\U0001F64C' # Dunno, but it needs to be replaced for ASCII
]
replacements = [
'-<@%',
':(|)',
':(:)',
'(]:{',
'</3',
'<3',
'~@~',
':D',
'^_^',
'XD',
':)',
'=D',
'^_^;;',
'O:)',
'}:)',
';)',
'B-)',
':|',
'-_-',
'o_o;',
'u_u',
':/',
':S',
':*',
';*',
':P',
';P',
':(',
'>.<',
":'(",
'>_<',
'D:',
':o',
':O',
'-_-Zzz',
'x_x',
':3',
'_'
]
for index in range(len(patterns)):
string = re.sub(patterns[index], replacements[index], string)
return string
def hangoutsToArray(json_input, timestamp_format):
# set the desired timestamp format here
# the default is '%Y-%m-%d %H:%M:%S' which is YYYY-MM-DD HH:mm:ss.
#timestamp_format = '%Y-%m-%d %H:%M:%S'
# decode JSON
decoded = json.loads(json_input)
# extract useful part
rawconvos = decoded['conversation_state']
#print "%r" % rawconvos
retval = []
# loop through conversations
for i in range(len(rawconvos)):
#print "i is %d" % i
#print "attempting in_conv: %s" % rawconvos[i]['conversation_state']['conversation']
# first, get metadata
retval.append({})
convo = rawconvos[i]
#print "%r" % convo
in_conv = rawconvos[i]['conversation_state']['conversation']
in_event = rawconvos[i]['conversation_state']['event']
pdata = in_conv['participant_data']
retval[i]['type'] = in_conv['type']
retval[i]['msgcount'] = len(in_event)
retval[i]['name'] = in_conv['name'] if 'name' in in_conv.keys() else ""
# conversation participants
for j in range(len(pdata)):
id = pdata[j]['id']['chat_id']
# use "unknown_<chat_id>" as name if they don't have a fallback_name
name = pdata[j]['fallback_name'] if 'fallback_name' in pdata[j].keys() else "unknown_%s" % id
if not 'members' in retval[i].keys():
retval[i]['members'] = {}
retval[i]['members'][id] = name
# loop through messages/events
messages = []
for k in range(len(in_event)):
messages.append({})
messages[k]['timestamp'] = in_event[k]['timestamp']
messages[k]['datetime'] = time.strftime(timestamp_format,time.localtime(int(messages[k]['timestamp'][0:10])))
messages[k]['sender_id'] = in_event[k]['sender_id']['chat_id']
messages[k]['sender'] = retval[i]['members'][messages[k]['sender_id']] if messages[k]['sender_id'] in retval[i]['members'].keys() else "unknown_%s" % id
messages[k]['event_type'] = in_event[k]['event_type']
if messages[k]['event_type'] == 'RENAME_CONVERSATION':
newname = in_event[k]['conversation_rename']['new_name']
oldname = in_event[k]['conversation_rename']['old_name']
messages[k]['message'] = "changed conversation name %s%s" % \
(("from '%s'" % oldname) if oldname else "",
("to '%s'" % newname) if newname else "")
elif messages[k]['event_type'] == 'HANGOUT_EVENT':
if in_event[k]['hangout_event']['event_type'] == 'START_HANGOUT':
messages[k]['message'] = 'started a video chat'
elif in_event[k]['hangout_event']['event_type'] == 'END_HANGOUT':
messages[k]['message'] = 'ended a video chat'
else:
messages[k]['message'] = in_event[k]['hangout_event']['event_type']
elif messages[k]['event_type'] == 'REGULAR_CHAT_MESSAGE':
messages[k]['message'] = ""
msg = ""
msghtml = ""
# join message segments together
if 'segment' in in_event[k]['chat_message']['message_content'].keys():
for event in in_event[k]['chat_message']['message_content']['segment']:
if not 'text' in event.keys():
continue
if event['type'] == 'TEXT':
msg += event['text']
msghtml += re.sub("\n", "<br>", event['text'])
elif event['type'] == 'LINK':
msg += event['text']
msghtml += '<a href="%s" target="_blank">%s</a>' % (event['link_data']['link_target'], event['text'])
elif event['type'] == 'LINE_BREAK':
msg += event['text']
msghtml += re.sub("\n", "<br>", event['text'])
# handle attachments
elif 'attachment' in in_event[k]['chat_message']['message_content'].keys():
# loop through attachments
for att in in_event[k]['chat_message']['message_content']['attachment']:
# echo "<pre>";print_r($att);echo "</pre>";
if att['embed_item']['type'][0] == 'PLUS_PHOTO':
imgurl = att['embed_item']['embeds.PlusPhoto.plus_photo']['url']
msg += imgurl
msghtml += '<a href="%s" target="_blank"><img src="%s" alt="attached image" style="max-width:%s"></a>' % (imgurl, imgurl, "100%")
# replace unicode emoticon characters by smileys
messages[k]['message'] = replaceSmileys(msg)
if msg != msghtml:
messages[k]['message_html'] = replaceSmileys(msghtml)
elif messages[k]['event_type'] == 'ADD_USER':
newuserid = in_event[k]['membership_change']['participant_id'][0]['chat_id']
newusername = retval[i]['members'][newuserid] if newuserid in retval[i]['members'].keys() else 'unknown_%s' % newuserid
messages[k]['message'] = "added user '%s' to conversation" % newusername
elif messages[k]['event_type'] == 'REMOVE_USER':
newuserid = in_event[k]['membership_change']['participant_id'][0]['chat_id']
newusername = retval[i]['members'][newuserid] if newuserid in retval[i]['members'].keys() else 'unknown_%s' % newuserid
messages[k]['message'] = "removed user '%s' from conversation" % newusername
elif messages[k]['event_type'] == 'SMS':
messages[k]['message'] = ""
# join message segments together
if 'segment' in in_event[k]['chat_message']['message_content'].keys():
for l in range(len(in_event[k]['chat_message']['message_content']['segment'])):
if not 'text' in in_event[k]['chat_message']['message_content']['segment'][l].keys():
continue
messages[k]['message'] += in_event[k]['chat_message']['message_content']['segment'][l]['text']
# replace unicode emoticon characters by smileys
messages[k]['message'] = replaceSmileys(messages[k]['message'])
elif messages[k]['event_type'] == 'OTR_MODIFICATION':
messages[k]['message'] = 'unknown OTR_MODIFICATION'
elif messages[k]['event_type'] == 'VOICEMAIL':
messages[k]['message'] = "new voicemail:\n"
# join message segments together
if 'segment' in in_event[k]['chat_message']['message_content'].keys():
for l in range(len(in_event[k]['chat_message']['message_content']['segment'])):
if not 'text' in in_event[k]['chat_message']['message_content']['segment'][l].keys():
continue
messages[k]['message'] += in_event[k]['chat_message']['message_content']['segment'][l]['text']
# replace unicode emoticon characters by smileys
messages[k]['message'] = replaceSmileys(messages[k]['message'])
# sort messages by timestamp because for some reason they're cluttered
messages.sort(cmp=lambda a,b: int(a['timestamp']) - int(b['timestamp']))
# add the messages array to the conversation array
retval[i]['messages'] = messages
return retval
| [
"coandco@gmail.com"
] | coandco@gmail.com |
a2d5445911dce65f690596d6b01dfa4bac29a33c | 0c1d799e35f78c2d9acacb26464632231345bb4d | /In Class/stringToUpper.py | 3db28792dd57c25e0468efe4c82d841f53be16c7 | [] | no_license | MatthewSteinRWU/Computer-Vision | cc022844d1858f9b823feed2d7ba5b14aba65dfd | f1129051fe7966924b5d17ddad9e6232e0eee911 | refs/heads/master | 2020-07-12T02:56:06.892743 | 2020-01-01T23:13:51 | 2020-01-01T23:13:51 | 204,698,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | #!/usr/bin/env python
# User interaction demo
# Get a string from the user and raise it up UPPERCASE
print "Please enter a string between 10 and 30 characters"
usrString = raw_input("-->")
print usrString.upper()
| [
"noreply@github.com"
] | noreply@github.com |
8aeb1300f85a1aaafb71ce05a4910eda695d01de | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_461.py | 5bdb3315298efd0854676b72294e5e643b54f60a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # How to query filter in django without multiple occurrences
ParentModel.objects.filter(childmodel__in=ChildModel.objects.all()).distinct()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
8c8bebf2a63b9aa9dc26ad96d1b7676e2847644f | 407e350e2b698379c89e9aee18581527549cd6f7 | /strange_counter.py | 80b3ac741ae96202889d7261e82d790522b2a824 | [] | no_license | mismayil/hackerrank | df36caee7b77e7e358ae964026a23b9a47b8f7b8 | ca0f9ab6bc272c1ba8accf2d90f4d8c24179aa42 | refs/heads/master | 2020-06-26T01:16:24.472391 | 2017-05-12T20:50:41 | 2017-05-12T20:50:41 | 74,608,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | '''
Problem: https://www.hackerrank.com/challenges/strange-code
'''
import sys
import math
t = int(input().strip())
n = t // 3
if n > 1: n = math.floor(math.log(n, 2))
if n ==0: p = 0
else: p = 3
for i in range(n-1):
p += 3 * (2 ** (i+1))
print(3 * (2 ** n) - (t - (p + 1)))
| [
"mismayilza@gmail.com"
] | mismayilza@gmail.com |
70fd281c05a5ef8edd058a4dc85a5f5d8306a24a | 1ee3dff11c22034e67f1eae4e3d11aef2d68b332 | /ex3/int_to_roman.py | 9c2b4b25c3455834f8750f62b6e72cd1c0b9511b | [] | no_license | samusdriconus/python_exercices | a4bb63f3e3cfd0982aebbe294c8b96e493f98b20 | ac4651ee4bc0eb8693c2848e65e543eb28530cac | refs/heads/master | 2021-05-19T07:02:03.093331 | 2020-03-31T23:13:53 | 2020-03-31T23:13:53 | 251,576,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | from collections import OrderedDict
def int_to_roman(num):
roman = OrderedDict()
roman[1000] = "M"
roman[900] = "CM"
roman[500] = "D"
roman[400] = "CD"
roman[100] = "C"
roman[90] = "XC"
roman[50] = "L"
roman[40] = "XL"
roman[10] = "X"
roman[9] = "IX"
roman[5] = "V"
roman[4] = "IV"
roman[1] = "I"
def roman_num(num):
for r in roman.keys():
x, y = divmod(num, r)
yield roman[r] * x
num -= (r * x)
if num <= 0:
break
return "".join([a for a in roman_num(num)])
if __name__ == '__main__':
print(int_to_roman(25)) | [
"saidi-ouss@outlook.com"
] | saidi-ouss@outlook.com |
01fa6b59ed7388b26021f73ae8a0eed9453bd052 | 7e7400c9285dcc5dd1961c5865de35eda8ed2517 | /scrapy/kubahime/kubahime/settings.py | 244b22f57dd3ff7085ef73703f807d61eb071b46 | [] | no_license | leolulu/pystudy2 | 2ce861a82cf6d670197a9f1f60485a28206ebb7e | 1f929cdd08b2074a055e0b58b5dae3e52892c546 | refs/heads/master | 2021-06-25T11:04:59.396592 | 2020-11-12T15:51:40 | 2020-11-12T15:51:40 | 148,102,428 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,449 | py | # -*- coding: utf-8 -*-
# Scrapy settings for kubahime project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'kubahime'
SPIDER_MODULES = ['kubahime.spiders']
NEWSPIDER_MODULE = 'kubahime.spiders'
LOG_LEVEL = 'WARNING'
<<<<<<< HEAD:scrapy/firstscrapy/firstscrapy/settings.py
=======
>>>>>>> 6c742d3389f4108fd48fd6b99b69707eaa84a951:scrapy/kubahime/kubahime/settings.py
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'kubahime (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'kubahime.middlewares.KubahimeSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'kubahime.middlewares.ProxyMiddleware': 543,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
<<<<<<< HEAD:scrapy/firstscrapy/firstscrapy/settings.py
'firstscrapy.pipelines.FirstscrapyPipeline': 300,
=======
'kubahime.pipelines.KubahimePipeline': 300,
>>>>>>> 6c742d3389f4108fd48fd6b99b69707eaa84a951:scrapy/kubahime/kubahime/settings.py
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"348699103@qq.com"
] | 348699103@qq.com |
b8baecf25dc610700a99599cda99a761076542b8 | 8c487bbf9c4193c54a5a58557968b11e220cb07a | /exercicioClassificacaoMedia.py | a24a328439c79319fbb52aded9f5da88dac32461 | [] | no_license | Dex4n/algoritmos-python | c420a0b7d17946c130a5c1419bd7c81e23583f92 | 4330929a9cce9abb1ff255001dc4b7e6e251ac04 | refs/heads/master | 2021-03-16T21:49:59.598396 | 2020-03-13T13:07:24 | 2020-03-13T13:07:24 | 246,946,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py |
nota1 = float(input("Digite o valor para a 1ª nota: "))
nota2 = float(input("Digite o valor para a 2ª nota: "))
media_aluno = (nota1 + nota2)/2
if media_aluno >= 9.0 and media_aluno <= 10.0:
print("Nota 1 do aluno: %.2f"%(nota1))
print("Nota 2 do aluno: %.2f"%(nota2))
print("Média do aluno: %.2f"%(media_aluno))
print("Aluno com classificação A: APROVADO!")
if media_aluno >= 7.5 and media_aluno < 9.0:
print("Nota 1 do aluno: %.2f"%(nota1))
print("Nota 2 do aluno: %.2f"%(nota2))
print("Média do aluno: %.2f"%(media_aluno))
print("Aluno com classificação B: APROVADO!")
if media_aluno >= 6.0 and media_aluno < 7.5:
print("Nota 1 do aluno: %.2f"%(nota1))
print("Nota 2 do aluno: %.2f"%(nota2))
print("Média do aluno: %.2f"%(media_aluno))
print("Aluno com classificação C: APROVADO!")
if media_aluno >= 4.0 and media_aluno < 6.0:
print("Nota 1 do aluno: %.2f"%(nota1))
print("Nota 2 do aluno: %.2f"%(nota2))
print("Média do aluno: %.2f"%(media_aluno))
print("Aluno com classificação D: REPROVADO!")
if media_aluno >= 0 and media_aluno < 4.0:
print("Nota 1 do aluno: %.2f"%(nota1))
print("Nota 2 do aluno: %.2f"%(nota2))
print("Média do aluno: %.2f"%(media_aluno))
print("Aluno com classificação D: REPROVADO!")
| [
"alexandre_marino@outlook.com"
] | alexandre_marino@outlook.com |
0c4d74fc244e79ebb2b0c11a0c7f7fcf431d901f | 079c07c5d97eb60d36269e27309e84b25ea0aaeb | /guidehero-backend/app/managers/call_manager.py | 2df061c86f9dcff1932fc86ea2e7e2a95baf97e2 | [] | no_license | itdream-dev/python | 3aa44329673f05e2a86e1cba56cb88101c777233 | eda81b802b99f45933bdf0d22b508837cfa538f0 | refs/heads/master | 2023-03-05T12:27:42.776870 | 2020-05-11T15:54:45 | 2020-05-11T15:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | # -*- coding: utf-8 -*-
from config import Ivysaur
from lib.registry import get_registry
from lib.models.call import Call
from lib.push_notifications import PushNotifications
class CallManager(object):
def __init__(self):
registry = get_registry()
self.call_repo = registry['CALL_REPO']
self.user_repo = registry['USER_REPO']
self.device_repo = registry['DEVICE_REPO']
self.tokbox = registry['TOKBOX']
self.push_notifications = PushNotifications()
def start_session(self, user, user_id_2):
session = self.tokbox.create_session()
session_id = session.session_id
token = self.tokbox.generate_token(session_id)
recepient = self.user_repo.get_user(user_id_2)
self.call_repo.start_session(user, recepient, session_id)
device = self.device_repo.get_latest_device(user_id_2)
if device:
self.push_notifications.send_notification(
device.device_token,
'Incoming call from %s' % user.name,
sound='calling.caf'
)
return {
'api_key': Ivysaur.Config.TOKBOX_API_KEY,
'session_id': session_id,
'token': token
}
def get_pending_call(self, user):
pending_call = self.call_repo.get_pending_call(user)
if not pending_call:
return {}
session_id = pending_call.session_id
token = self.tokbox.generate_token(session_id)
return {
'api_key': Ivysaur.Config.TOKBOX_API_KEY,
'session_id': session_id,
'token': token,
'caller_name': pending_call.caller.name
}
def report_connected(self, session_id):
call = self.call_repo.get_call_from_session_id(session_id)
if not call or call.status != Call.INITIATED:
return
self.call_repo.report_connected(call)
def report_ended(self, session_id):
call = self.call_repo.get_call_from_session_id(session_id)
if not call or call.status == Call.ENDED:
return
self.call_repo.report_ended(call)
| [
"skyclean906@gmail.com"
] | skyclean906@gmail.com |
62c2ec2abae957c33caddc0e8b9e03242a0fa3df | ea3dd300fca6b79b76ab6565c8020409e917ff9d | /crawl_movie/crawl_movie/middlewares.py | c85510322e775125dac8442d080e322b658c8c98 | [] | no_license | DanerHeart/your_project | c54dd69781a4469d353a94c76673ad12a567bc27 | 0ae449227a2b3039feb6d907f84850ff6b580a34 | refs/heads/master | 2020-03-25T22:23:28.121487 | 2018-08-12T00:17:15 | 2018-08-12T00:17:15 | 144,220,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
import random
from scrapy import signals
class MyUserAgentMiddleware(UserAgentMiddleware):
'''
设置User-Agent
'''
def __init__(self, user_agent):
self.user_agent = user_agent
@classmethod
def from_crawler(cls, crawler):
return cls(
user_agent=crawler.settings.get('MY_USER_AGENT')
)
def process_request(self, request, spider):
agent = random.choice(self.user_agent)
request.headers['User-Agent'] = agent
class CrawlMovieSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CrawlMovieDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"liu_baoxi@foxmail.com"
] | liu_baoxi@foxmail.com |
8fbf5d9f2722729e1ebeda7654e80b9bcd7ddc6e | ba8905e3d5448c600553472c580bf67b20d39448 | /HelloFlask/App/views/first_blue.py | 6136fdf13854bf518f51d093f7377cab1da8874f | [] | no_license | Jet-Morgan/Flask | d57cc3ac9fdbfdbf21a45afbc83c672a21476ac0 | 03957b66ce9e1326e203fffb60817719e9b2bacd | refs/heads/master | 2020-09-16T21:48:10.206726 | 2019-12-03T09:42:54 | 2019-12-03T09:42:54 | 223,896,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # coding=utf-8
from flask import Blueprint, render_template
from App.models import models, User
blue = Blueprint('blue', __name__)
@blue.route('/')
def index():
# return '我是蓝图的主页'
return render_template('index.html', msg="It is a messge from index.html")
@blue.route('/dropdb/')
def dropdb():
models.drop_all()
return '删除成功'
@blue.route('/createdb/')
def createdb():
models.create_all()
return '创建成功'
@blue.route('/adduser/')
def add_user():
user = User()
user.username = "Tom"
user.save()
# models.session.add(user)
# models.session.commit()
return '增加创建成功' | [
"15182948100@163.com"
] | 15182948100@163.com |
b6cac6e6a6498a5ee259af5f31f06bb09bf480ed | 4106383e1bc1ec191476ad8b59a0ed5bbe2b46e8 | /subm/data/s_loss_1.41586900332_r_64_c_64_folds_13_ep_50_2016-05-06-21-32_code.py | 5ac5a59990e80022bcdea4135c19e9a8cd53003a | [] | no_license | NickStupich/state_farm_neural_net | cfd70928e58af88ec9255219c5702b68391ed583 | acfc7d30001d0597ad6f76be3bd1f51724fe8e89 | refs/heads/master | 2021-06-01T00:44:00.690734 | 2016-08-07T06:40:03 | 2016-08-07T06:40:03 | 56,463,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,281 | py | # -*- coding: utf-8 -*-
import numpy as np
np.random.seed(2016)
import os
import glob
import cv2
import math
import pickle
import datetime
import pandas as pd
import statistics
import time
from shutil import copy2
import warnings
import random
warnings.filterwarnings("ignore")
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.noise import GaussianNoise
from keras.optimizers import SGD
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import np_utils
from keras.models import model_from_json
from sklearn.metrics import log_loss
from scipy.misc import imread, imresize, imshow
use_cache = 1
def show_image(im, name='image'):
cv2.imshow(name, im)
cv2.waitKey(0)
cv2.destroyAllWindows()
# color_type = 1 - gray
# color_type = 3 - RGB
def get_im_cv2(path, img_rows, img_cols, color_type=1):
# Load as grayscale
if color_type == 1:
img = cv2.imread(path, 0)
elif color_type == 3:
img = cv2.imread(path)
# Reduce size
resized = cv2.resize(img, (img_cols, img_rows), cv2.INTER_LINEAR)
return resized
def get_im_cv2_mod(path, img_rows, img_cols, color_type=1):
# Load as grayscale
if color_type == 1:
img = cv2.imread(path, 0)
else:
img = cv2.imread(path)
# Reduce size
rotate = random.uniform(-10, 10)
M = cv2.getRotationMatrix2D((img.shape[1]/2, img.shape[0]/2), rotate, 1)
img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
resized = cv2.resize(img, (img_cols, img_rows), cv2.INTER_LINEAR)
return resized
def get_driver_data():
dr = dict()
clss = dict()
path = os.path.join('driver_imgs_list.csv')
print('Read drivers data')
f = open(path, 'r')
line = f.readline()
while (1):
line = f.readline()
if line == '':
break
arr = line.strip().split(',')
dr[arr[2]] = arr[0]
if arr[0] not in clss.keys():
clss[arr[0]] = [(arr[1], arr[2])]
else:
clss[arr[0]].append((arr[1], arr[2]))
f.close()
return dr, clss
def load_train(img_rows, img_cols, color_type=1):
X_train = []
X_train_id = []
y_train = []
driver_id = []
start_time = time.time()
driver_data, dr_class = get_driver_data()
print('Read train images')
for j in range(10):
print('Load folder c{}'.format(j))
path = os.path.join('train', 'c' + str(j), '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
# img = get_im_cv2(fl, img_rows, img_cols, color_type)
img = get_im_cv2_mod(fl, img_rows, img_cols, color_type)
X_train.append(img)
X_train_id.append(flbase)
y_train.append(j)
driver_id.append(driver_data[flbase])
print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))
unique_drivers = sorted(list(set(driver_id)))
print('Unique drivers: {}'.format(len(unique_drivers)))
print(unique_drivers)
return X_train, y_train, X_train_id, driver_id, unique_drivers
def load_test(img_rows, img_cols, color_type=1):
print('Read test images')
path = os.path.join('test', '*.jpg')
files = glob.glob(path)
X_test = []
X_test_id = []
total = 0
start_time = time.time()
thr = math.floor(len(files)/10)
for fl in files:
flbase = os.path.basename(fl)
# img = get_im_cv2(fl, img_rows, img_cols, color_type)
img = get_im_cv2_mod(fl, img_rows, img_cols, color_type)
X_test.append(img)
X_test_id.append(flbase)
total += 1
if total%thr == 0:
print('Read {} images from {}'.format(total, len(files)))
print('Read test data time: {} seconds'.format(round(time.time() - start_time, 2)))
return X_test, X_test_id
def cache_data(data, path):
if os.path.isdir(os.path.dirname(path)):
file = open(path, 'wb')
pickle.dump(data, file)
file.close()
else:
print('Directory doesnt exists')
def restore_data(path):
data = dict()
if os.path.isfile(path):
file = open(path, 'rb')
data = pickle.load(file)
return data
def save_model(model, arch_path, weights_path):
json_string = model.to_json()
if not os.path.isdir('cache'):
os.mkdir('cache')
open(arch_path, 'w').write(json_string)
model.save_weights(weights_path, overwrite=True)
def read_model(arch_path, weights_path):
model = model_from_json(open(arch_path).read())
model.load_weights(weights_path)
return model
def split_validation_set(train, target, test_size):
random_state = 51
X_train, X_test, y_train, y_test = train_test_split(train, target, test_size=test_size, random_state=random_state)
return X_train, X_test, y_train, y_test
def create_submission(predictions, test_id, info):
result1 = pd.DataFrame(predictions, columns=['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'])
result1.loc[:, 'img'] = pd.Series(test_id, index=result1.index)
now = datetime.datetime.now()
if not os.path.isdir('subm'):
os.mkdir('subm')
suffix = info + '_' + str(now.strftime("%Y-%m-%d-%H-%M"))
sub_file = os.path.join('subm', 'submission_' + suffix + '.csv')
result1.to_csv(sub_file, index=False)
def save_useful_data(predictions_valid, valid_ids, model, info):
result1 = pd.DataFrame(predictions_valid, columns=['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'])
result1.loc[:, 'img'] = pd.Series(valid_ids, index=result1.index)
now = datetime.datetime.now()
if not os.path.isdir(os.path.join('subm', 'data')):
os.mkdir(os.path.join('subm', 'data'))
suffix = info + '_' + str(now.strftime("%Y-%m-%d-%H-%M"))
# Save predictions
pred_file = os.path.join('subm', 'data', 's_' + suffix + '_train_predictions.csv')
result1.to_csv(pred_file, index=False)
# Save model
json_string = model.to_json()
model_file = os.path.join('subm', 'data', 's_' + suffix + '_model.json')
open(model_file, 'w').write(json_string)
# Save code
cur_code = os.path.realpath(__file__)
code_file = os.path.join('subm', 'data', 's_' + suffix + '_code.py')
copy2(cur_code, code_file)
def read_and_normalize_train_data(img_rows, img_cols, color_type=1):
cache_path = os.path.join('cache', 'train_r_' + str(img_rows) + '_c_' + str(img_cols) + '_t_' + str(color_type) + '_rotated.dat')
if not os.path.isfile(cache_path) or use_cache == 0:
train_data, train_target, train_id, driver_id, unique_drivers = load_train(img_rows, img_cols, color_type)
cache_data((train_data, train_target, train_id, driver_id, unique_drivers), cache_path)
else:
print('Restore train from cache!')
(train_data, train_target, train_id, driver_id, unique_drivers) = restore_data(cache_path)
train_data = np.array(train_data, dtype=np.uint8)
train_target = np.array(train_target, dtype=np.uint8)
if color_type == 1:
train_data = train_data.reshape(train_data.shape[0], 1, img_rows, img_cols)
else:
train_data = train_data.transpose((0, 3, 1, 2))
train_target = np_utils.to_categorical(train_target, 10)
train_data = train_data.astype('float32')
train_data /= 255
print('Train shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
return train_data, train_target, train_id, driver_id, unique_drivers
def read_and_normalize_test_data(img_rows, img_cols, color_type=1):
cache_path = os.path.join('cache', 'test_r_' + str(img_rows) + '_c_' + str(img_cols) + '_t_' + str(color_type) + '_rotated.dat')
if not os.path.isfile(cache_path) or use_cache == 0:
test_data, test_id = load_test(img_rows, img_cols, color_type)
cache_data((test_data, test_id), cache_path)
else:
print('Restore test from cache!')
(test_data, test_id) = restore_data(cache_path)
test_data = np.array(test_data, dtype=np.uint8)
if color_type == 1:
test_data = test_data.reshape(test_data.shape[0], 1, img_rows, img_cols)
else:
test_data = test_data.transpose((0, 3, 1, 2))
# test_data = test_data.swapaxes(3, 1)
test_data = test_data.astype('float32')
test_data /= 255
print('Test shape:', test_data.shape)
print(test_data.shape[0], 'test samples')
return test_data, test_id
def merge_several_folds_mean(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a += np.array(data[i])
a /= nfolds
return a.tolist()
def merge_several_folds_geom(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a *= np.array(data[i])
a = np.power(a, 1/nfolds)
return a.tolist()
def copy_selected_drivers(train_data, train_target, driver_id, driver_list):
data = []
target = []
index = []
for i in range(len(driver_id)):
if driver_id[i] in driver_list:
data.append(train_data[i])
target.append(train_target[i])
index.append(i)
data = np.array(data)
target = np.array(target)
index = np.array(index)
return data, target, index
printedSummary=False
def create_model_v1(img_rows, img_cols, color_type=1):
global printedSummary
model = Sequential()
# model.add(GaussianNoise(0.05, input_shape=(color_type, img_rows, img_cols)))
model.add(Convolution2D(32, 3, 3, border_mode='same', init='he_normal',
input_shape=(color_type, img_rows, img_cols)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Convolution2D(64, 3, 3, border_mode='same', init='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Convolution2D(128, 3, 3, border_mode='same', init='he_normal'))
model.add(MaxPooling2D(pool_size=(8, 8)))
model.add(Dropout(0.5))
# model.add(Convolution2D(256, 3, 3, border_mode='same', init='he_normal'))
# model.add(MaxPooling2D(pool_size=(4, 4)))
# model.add(Dropout(0.5))
model.add(Flatten())
# model.add(Dense(50))
# model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
if not printedSummary:
model.summary()
printedSummary = True
model.compile(Adam(lr=1e-3), loss='categorical_crossentropy')
return model
def get_validation_predictions(train_data, predictions_valid):
pv = []
for i in range(len(train_data)):
pv.append(predictions_valid[i])
return pv
def run_cross_validation(nfolds=10):
# input image dimensions
img_rows, img_cols = 64, 64
# color type: 1 - grey, 3 - rgb
color_type_global = 1
batch_size = 64
nb_epoch = 50
random_state = 51
restore_from_last_checkpoint = 0
train_data, train_target, train_id, driver_id, unique_drivers = read_and_normalize_train_data(img_rows, img_cols, color_type_global)
test_data, test_id = read_and_normalize_test_data(img_rows, img_cols, color_type_global)
yfull_train = dict()
yfull_test = []
kf = KFold(len(unique_drivers), n_folds=nfolds, shuffle=True, random_state=random_state)
num_fold = 0
sum_score = 0
for train_drivers, test_drivers in kf:
model = create_model_v1(img_rows, img_cols, color_type_global)
unique_list_train = [unique_drivers[i] for i in train_drivers]
X_train, Y_train, train_index = copy_selected_drivers(train_data, train_target, driver_id, unique_list_train)
unique_list_valid = [unique_drivers[i] for i in test_drivers]
X_valid, Y_valid, test_index = copy_selected_drivers(train_data, train_target, driver_id, unique_list_valid)
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
print('Train drivers: ', unique_list_train)
print('Test drivers: ', unique_list_valid)
kfold_weights_path = os.path.join('cache', 'weights_kfold_' + str(num_fold) + '.h5')
if not os.path.isfile(kfold_weights_path) or restore_from_last_checkpoint == 0:
callbacks = [
EarlyStopping(monitor='val_loss', patience=1, verbose=0),
ModelCheckpoint(kfold_weights_path, monitor='val_loss', save_best_only=True, verbose=0),
]
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, verbose=1, validation_data=(X_valid, Y_valid),
callbacks=callbacks)
if os.path.isfile(kfold_weights_path):
model.load_weights(kfold_weights_path)
# score = model.evaluate(X_valid, Y_valid, show_accuracy=True, verbose=0)
# print('Score log_loss: ', score[0])
predictions_valid = model.predict(X_valid, batch_size=batch_size, verbose=1)
score = log_loss(Y_valid, predictions_valid)
print('Score log_loss: ', score)
sum_score += score*len(test_index)
# Store valid predictions
for i in range(len(test_index)):
yfull_train[test_index[i]] = predictions_valid[i]
# Store test predictions
test_prediction = model.predict(test_data, batch_size=batch_size, verbose=1)
yfull_test.append(test_prediction)
score = sum_score/len(train_data)
print("Log_loss train independent avg: ", score)
predictions_valid = get_validation_predictions(train_data, yfull_train)
score1 = log_loss(train_target, predictions_valid)
if abs(score1 - score) > 0.0001:
print('Check error: {} != {}'.format(score, score1))
print('Final log_loss: {}, rows: {} cols: {} nfolds: {} epoch: {}'.format(score, img_rows, img_cols, nfolds, nb_epoch))
info_string = 'loss_' + str(score) \
+ '_r_' + str(img_rows) \
+ '_c_' + str(img_cols) \
+ '_folds_' + str(nfolds) \
+ '_ep_' + str(nb_epoch)
test_res = merge_several_folds_mean(yfull_test, nfolds)
# test_res = merge_several_folds_geom(yfull_test, nfolds)
create_submission(test_res, test_id, info_string)
save_useful_data(predictions_valid, train_id, model, info_string)
def run_single():
# input image dimensions
img_rows, img_cols = 64, 64
color_type_global = 1
batch_size = 32
nb_epoch = 50
random_state = 51
train_data, train_target, train_id, driver_id, unique_drivers = read_and_normalize_train_data(img_rows, img_cols, color_type_global)
test_data, test_id = read_and_normalize_test_data(img_rows, img_cols, color_type_global)
yfull_test = []
unique_list_train = ['p002', 'p012', 'p014', 'p015', 'p016', 'p021', 'p022', 'p035', 'p041', 'p042', 'p045', 'p047', 'p049', 'p050', 'p051', 'p052', 'p056', 'p061', 'p064', 'p066', 'p075', 'p081']
X_train, Y_train, train_index = copy_selected_drivers(train_data, train_target, driver_id, unique_list_train)
unique_list_valid = ['p024', 'p026', 'p039', 'p072']
X_valid, Y_valid, test_index = copy_selected_drivers(train_data, train_target, driver_id, unique_list_valid)
print('Start Single Run')
print('Split train: ', len(X_train))
print('Split valid: ', len(X_valid))
print('Train drivers: ', unique_list_train)
print('Valid drivers: ', unique_list_valid)
callbacks = [
EarlyStopping(monitor='val_loss', patience=2, verbose=0),
]
model = create_model_v1(img_rows, img_cols, color_type_global)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, verbose=1, validation_data=(X_valid, Y_valid),
callbacks=callbacks)
# score = model.evaluate(X_valid, Y_valid, show_accuracy=True, verbose=0)
# print('Score log_loss: ', score[0])
predictions_valid = model.predict(X_valid, batch_size=batch_size, verbose=1)
score = log_loss(Y_valid, predictions_valid)
print('Score log_loss: ', score)
# Store test predictions
test_prediction = model.predict(test_data, batch_size=batch_size, verbose=1)
yfull_test.append(test_prediction)
print('Final log_loss: {}, rows: {} cols: {} epoch: {}'.format(score, img_rows, img_cols, nb_epoch))
info_string = 'loss_' + str(score) \
+ '_r_' + str(img_rows) \
+ '_c_' + str(img_cols) \
+ '_ep_' + str(nb_epoch)
full_pred = model.predict(train_data, batch_size=batch_size, verbose=1)
score = log_loss(train_target, full_pred)
print('Full score log_loss: ', score)
test_res = merge_several_folds_mean(yfull_test, 1)
create_submission(test_res, test_id, info_string)
save_useful_data(full_pred, train_id, model, info_string)
if __name__ == '__main__':
run_cross_validation(13)
# run_single() | [
"nick.stupich@gmail.com"
] | nick.stupich@gmail.com |
aa021563c62bd697794e0ff8f581e92b7cdbd047 | 47b9b617c9ac0de925c29368ae57e0a986bcbb02 | /DataStructures/LinkedList/ReverseLinkedList.py | 27596a804f51d7dc5b460788c9af3b43e1f3e966 | [] | no_license | YuliangZHENG6/hackerrank | 71b24100a151d7a1693cb71b602034a9b10da6b6 | 55a38303eadf3e423c84cafc1c51d72faed44ba4 | refs/heads/master | 2021-08-16T20:32:38.788273 | 2017-11-20T09:37:25 | 2017-11-20T09:37:25 | 107,589,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | """
Reverse a linked list
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
return back the head of the linked list in the below method.
"""
def Reverse(head):
if head == None:
return None
if head.next == None:
return head
currNode = head
nextNode = None
prevNode = None
while currNode != None:
nextNode = currNode.next
currNode.next = prevNode
prevNode = currNode
currNode = nextNode
head = prevNode
return head | [
"yuliang.zheng@epfl.ch"
] | yuliang.zheng@epfl.ch |
dcd05b317337bac479b22dcaea4f461603eaa11b | 02e23da0431623db86c8138bda350a1d526d4185 | /Archivos Python Documentos/Graficas/.history/matriz_20200222132010.py | 8534cb3045cb3b356fb2e42fe4a210b62a5a9f3b | [] | no_license | Jaamunozr/Archivos-python | d9996d3d10ff8429cd1b4c2b396016a3a5482889 | 1f0af9ba08f12ac27e111fcceed49bbcf3b39657 | refs/heads/master | 2022-08-05T14:49:45.178561 | 2022-07-13T13:44:39 | 2022-07-13T13:44:39 | 244,073,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import numpy as np
import os
import pylab as pl
import matplotlib.pyplot as plt
os.system("clear")
g=[
12, 23;
34, 34,
]
print (g)
"""
raiz=np.sqrt
ln=np.log
X = np.arange(-2, 12, 0.1)
Y = np.arange(-2, 12, 0.1)
J=np.count_nonzero(Y)
print (J)
a = [0] * J
for i in range(J):
a[i] = Y[i]
X[25]=0.49
X[65]=4.49
X[105]=8.49
Y[25]=0.49
Y[65]=4.49
Y[105]=8.49
ax, ay = 0.5, 0.5
bx, by = 4.5, 0.4
cx, cy = 8.5, 0.5
dx, dy = 0.5, 4.5
ex, ey = 8.5, 4.5
fx, fy = 0.5, 8.5
gx, gy = 4.5, 8.5
hx, hy = 8.5, 8.5
l = 2
rho= 100
ik=25
ma=raiz((X-ax)**2+(Y-ay)**2)
mb=raiz((X-bx)**2+(Y-by)**2)
mc=raiz((X-cx)**2+(Y-cy)**2)
md=raiz((X-dx)**2+(Y-dy)**2)
me=raiz((X-ex)**2+(Y-ey)**2)
mf=raiz((X-fx)**2+(Y-fy)**2)
mg=raiz((X-gx)**2+(Y-gy)**2)
mh=raiz((X-hx)**2+(Y-hy)**2)
va=ln((l+raiz(ma**2+l**2))/ma)
vb=ln((l+raiz(mb**2+l**2))/mb)
vc=ln((l+raiz(mc**2+l**2))/mc)
vd=ln((l+raiz(md**2+l**2))/md)
ve=ln((l+raiz(me**2+l**2))/me)
vf=ln((l+raiz(mf**2+l**2))/mf)
vg=ln((l+raiz(mg**2+l**2))/mg)
vh=ln((l+raiz(mh**2+l**2))/mh)
Vt=((rho*ik)/(2*np.pi))*(va+vb+vc+vd+ve+vf+vg+vh)
print (Vt[::].max())
print(type(Vt))
print(Vt.shape)
plt.figure(figsize=(X,Y))
plt.imshow(Vt, cmap = "summer")
plt.colorbar(
plt.show()
)""" | [
"jaamunozr@gmail.com"
] | jaamunozr@gmail.com |
efb22923988475abe76ca7e439a4aa6b18d00cd7 | 980bf0a9af6a7c9fd41cd042ddce9cb45d7ffd6f | /taobao_scrapy/taobao_scrapy/spiders/taobao.py | bed780dfae926e595b319505b5a25375842ae629 | [] | no_license | 406410672/tb_s | 8dc73c520e762b40d001a376b763207eda42f886 | 231845a77f0cbd23b73ee5e430047f0479af4f05 | refs/heads/master | 2020-03-12T21:40:39.043364 | 2018-05-23T10:04:37 | 2018-05-23T10:04:37 | 130,832,977 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,584 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/18 16:43
# @Author : HT
# @Site :
# @File : taobao.py
# @Software: PyCharm Community Edition
# @Describe: Desc
# @Issues : Issues
import sys
import os
import scrapy
ROOT_PATH = os.path.join(os.path.realpath(__file__), os.pardir)
sys.path.append(ROOT_PATH)
import re
from lxml import etree
from datetime import datetime
from scrapy.http import Request
from taobao_scrapy.MyItems.items import (TaobaoCategoryItem,
TaoBaolistsrpItem, TaoBaospulistItem, TaoBaomainsrpItem, TaoBaospudetailItem)
from taobao_scrapy.BaseModule.TaobaoParse import TaobaoParse, TaobaoItemDetailParse
from taobao_scrapy.BaseModule.HTLogger import HTLogger
from taobao_scrapy.Exceptions.ParserException import TaoBaoItemParserException
from taobao_scrapy.Util.StrHandle import *
class TaobaoSpider(scrapy.Spider):
name = 'TaobaoSpider'
allowed_domains = ['taobao.com']
root_url = 'https://www.taobao.com/markets/tbhome/market-list'
logger = HTLogger('taobao.log')
nav_cat_key_set = set()
def start_requests(self):
yield Request(self.root_url, self.parse)
return
def parse(self, response):
print(response.url)
tree = etree.HTML(response.text)
# x = '//*[text()="家电办公" or text()="手机数码" or text()="护肤彩妆"]'
# xpath_list = ['//*[text()="家电办公"]', '//*[text()="手机数码"]', '//*[text()="护肤彩妆"]']
# xpath_list = ['//*[text()="手机数码"]']
xpath_dict = {'手机数码': '//*[text()="手机数码"]',
'家电办公': '//*[text()="家电办公"]',
'护肤彩妆': '//*[text()="护肤彩妆"]',
}
xpath_dict = {
'手机数码': '//*[text()="手机数码"]',
'家电办公': '//*[text()="家电办公"]',
'珠宝配饰' : '//*[text()="珠宝配饰"]',
'护肤彩妆': '//*[text()="护肤彩妆"]',
}
for k,v in xpath_dict.items():
e_tree = tree.xpath(v)
category_list = list()
for element in e_tree:
sub_elements = element.xpath('../ul/li')
for sub_element in sub_elements:
p_category_name = sub_element.xpath('./a/text()')[0]
category_names_result = sub_element.xpath('./div/*[@class="category-name"]/text()')
category_urls_result = sub_element.xpath('./div/*[@class="category-name"]/@href')
for i in range(len(category_urls_result)):
category_name = category_names_result[i]
url = category_urls_result[i]
complate_category_name = '{}:{}:{}'.format(k, p_category_name, category_name)
category_list.append({'category_name': complate_category_name, 'category_url': url})
i = 0
for category in category_list:
i += 1
item = TaobaoCategoryItem()
c_n = category['category_name']
c_url = category['category_url']
insert_date = datetime.now()
item['category_name'] = c_n
item['category_url'] = c_url
item['insert_date'] = insert_date
yield item
url = 'https:' + c_url
if 'kuaicai' in url:
pass
else:
test_url = 'https://s.taobao.com/list?q=%E6%82%A6%E8%AF%97%E9%A3%8E%E5%90%9F&cat=1801%2C50071436%2C50010788%3B50011977%3B50011981%3B50011977%3B50011981%3B50011979%3B50011979%3B50011979%3B50011978%3B50011979%3B50011977&style=grid&seller_type=taobao&spm=a219r.lm843.1000187.1'
yield Request(url=test_url, callback=self.parse_content, meta={'category_name': c_n, 'category_url':c_url})
# yield Request(url='https:' + c_url, callback=self.parse_content,
# meta={'category_name': c_n, 'category_url': c_url})
#
return
def parse_content(self, response):
print('当前key:{}'.format(self.nav_cat_key_set))
meta = response.meta
category_name = meta['category_name']
category_url = meta['category_url']
content = response.text
request_url = response.url
g_page_config = TaobaoParse.get_page_config(content)
page_name = g_page_config.get('pageName')
insert_date = datetime.now()
data_info = g_page_config.get('mods').get('sortbar').get('data').get('pager')
if page_name == 'spudetail':
data_list = g_page_config.get('mods').get('itemlist').get('data').get('auctions')
item = TaoBaospudetailItem()
category_name_level_2 = meta.get('category_name_level_2')
item['category_name_level_2'] = category_name_level_2
item['category_name'] = category_name
item['category_url'] = category_url
item['insert_date'] = insert_date
item['request_url'] = request_url
item['page_name'] = page_name
item['data_info'] = data_info
item['data_list'] = data_list
yield item
if data_info != None:
page_size = data_info.get('pageSize')
totalCount = data_info.get('totalCount')
current_page = data_info.get('currentPage')
print('category_name:{}'.format(category_name))
print('category_name_level_2:{}'.format(category_name_level_2))
print('page_name:{}'.format(page_name))
print('page_size:{}'.format(page_size))
print('totalCount:{}'.format(totalCount))
print('current_page:{}'.format(current_page))
if int(current_page) * int(page_size) < int(totalCount):
og_url = response.url
s_value_list = re.findall('&(s=\d*)', og_url)
if len(s_value_list) == 0 and int(current_page) == 1:
new_url = og_url+'&s=60'
else:
new_url = og_url.replace(s_value_list[0], 's=%d'%(int(page_size)*int(current_page)))
print('新的url:{}'.format(new_url))
yield Request(url=new_url, callback=self.parse_content, meta={'category_name': category_name, 'category_url':category_url, 'category_name_level_2': category_name_level_2})
elif page_name == 'mainsrp':
data_list = g_page_config.get('mods').get('itemlist').get('data').get('auctions')
item = TaoBaomainsrpItem()
item['category_name'] = category_name
item['category_url'] = category_url
item['insert_date'] = insert_date
item['request_url'] = request_url
item['page_name'] = page_name
item['data_info'] = data_info
item['data_list'] = data_list
yield item
og_url = response.url
#先进行分类,假如页面数量大于100,则进行再次分类
#如果 key = path 则是叠加
#如果 key = cat 则是覆盖
#再进行分页
if data_info != None:
page_size = data_info.get('pageSize')
totalCount = data_info.get('totalCount')
current_page = data_info.get('currentPage')
total_page = data_info.get('totalPage')
print('category_name:{}'.format(category_name))
print('page_name:{}'.format(page_name))
print('page_size:{}'.format(page_size))
print('totalCount:{}'.format(totalCount))
print('current_page:{}'.format(current_page))
print('total_page:{}'.format(total_page))
max_totalpage = 90
if int(total_page) > max_totalpage:
self.logger.error('url: \n页面数量大于{},该页面需要添加分类'.format(og_url, max_totalpage))
try:
nav_category_list = g_page_config.get('mods').get('nav').get('data').get('common')
max_category_item = nav_category_list[0]
if max_category_item != None:
max_category_subs = max_category_item.get('sub')
for category_sub in max_category_subs:
key = category_sub['key']
value = category_sub['value']
self.nav_cat_key_set.add(key)
new_url = None
if key in og_url:
try:
re_regex = "&({}=[^&]*)".format(key)
print('re_regex =%s' % (re_regex))
find_parm = re.findall(re_regex, og_url)
find_parm = find_parm[0]
new_parm = '%s;%s'%(find_parm, value)
new_url = og_url.replace(find_parm, new_parm)
print('{}下 新的分类url:{}'.format(find_parm,new_url))
except Exception as error:
self.logger.error('正则表达式没有找到url {}'.format(error))
else:
new_url = og_url + '&{}={}'.format(key, value)
self.logger.debug('新的分类url:{}'.format(new_url))
yield Request(url=new_url, callback=self.parse_content,
meta={'category_name': category_name, 'category_url': category_url})
except Exception as error:
self.logger.error('获取分类失败与分页处理失败 :{}'.format(error))
return
elif int(current_page) < int(total_page):
print('处理分页数据!')
og_url = response.url
s_value_list = re.findall('&(s=\d*)', og_url)
if len(s_value_list) == 0 and int(current_page) == 1:
new_url = og_url+'&s=60'
else:
new_url = og_url.replace(s_value_list[0], 's=%d'%(int(page_size)*int(current_page)))
print('下一页:{}'.format(new_url))
yield Request(url=new_url, callback=self.parse_content, meta={'category_name': category_name, 'category_url':category_url})
elif page_name == 'listsrp':
data_list = g_page_config.get('mods').get('itemlist').get('data').get('auctions')
item = TaoBaolistsrpItem()
item['category_name'] = category_name
item['category_url'] = category_url
item['insert_date'] = insert_date
item['request_url'] = request_url
item['page_name'] = page_name
item['data_info'] = data_info
item['data_list'] = data_list
yield item
# for taobao_item in data_list:
# url = taobao_item.get('detail_url')
# url = 'https:' + url
# request = Request(url=url, callback=self.parser_item_detail)
# yield request
og_url = response.url
#先进行分类,假如页面数量大于100,则进行再次分类
#如果 key = path 则是叠加
#如果 key = cat 则是覆盖
#再进行分页
if data_info != None:
page_size = data_info.get('pageSize')
totalCount = data_info.get('totalCount')
current_page = data_info.get('currentPage')
total_page = data_info.get('totalPage')
self.logger.debug('category_name:{}'.format(category_name))
self.logger.debug('page_name:{}'.format(page_name))
self.logger.debug('page_size:{}'.format(page_size))
self.logger.debug('totalCount:{}'.format(totalCount))
self.logger.debug('current_page:{}'.format(current_page))
self.logger.debug('total_page:{}'.format(total_page))
max_totalpage = 95
if int(total_page) > max_totalpage:
self.logger.error('url:{} \n页面数量大于{},该页面需要添加分类'.format(og_url, max_totalpage))
try:
nav_category_list = g_page_config.get('mods').get('nav').get('data').get('common')
max_category_item = nav_category_list[0]
if max_category_item != None:
max_category_subs = max_category_item.get('sub')
for category_sub in max_category_subs:
key = category_sub['key']
value = category_sub['value']
self.nav_cat_key_set.add(key)
new_url = None
re_regex = "&({}=[^&]*)".format(key)
if key == 'cat':
try:
self.logger.debug('re_regex =%s' % (re_regex))
find_parm = re.findall(re_regex, og_url)
self.logger.debug('find_parm %s' % (find_parm))
find_parm = find_parm[0]
new_parm = '%s=%s' % (key, value)
new_url = og_url.replace(find_parm, new_parm)
self.logger.debug('{}下 新的分类url:{}'.format(find_parm, new_url))
except Exception as error:
self.logger.error('正则表达式没有找到url {}'.format(error))
else:
if key in og_url:
try:
self.logger.debug('re_regex =%s' % (re_regex))
find_parm = re.findall(re_regex, og_url)
self.logger.debug('find_parm %s' % (find_parm))
find_parm = find_parm[0]
new_parm = '%s;%s' % (find_parm, value)
new_url = og_url.replace(find_parm, new_parm)
self.logger.debug('{}下 新的分类url:{}'.format(find_parm, new_url))
except Exception as error:
self.logger.error('正则表达式没有找到url {}'.format(error))
else:
new_url = og_url + '&{}={}'.format(key, value)
self.logger.debug('新的分类url:{}'.format(new_url))
# print('cat_key_set:{}'.format(self.nav_cat_key_set))
yield Request(url=new_url, callback=self.parse_content,
meta={'category_name': category_name, 'category_url': category_url})
return
except Exception as error:
self.logger.error('获取分类失败与分页处理失败 :{}'.format(error))
# if have_error == False:
# return
if int(current_page) * int(page_size) < int(totalCount):
og_url = response.url
s_value_list = re.findall('&(s=\d*)', og_url)
if len(s_value_list) == 0 and int(current_page) == 1:
new_url = og_url+'&s=60'
else:
new_url = og_url.replace(s_value_list[0], 's=%d'%(int(page_size)*int(current_page)))
self.logger.debug('下一页:{}'.format(new_url))
yield Request(url=new_url, callback=self.parse_content, meta={'category_name': category_name, 'category_url':category_url})
elif page_name == 'spulist':
data_list = g_page_config.get('mods').get('grid').get('data').get('spus')
item = TaoBaospulistItem()
item['category_name'] = category_name
item['category_url'] = category_url
item['insert_date'] = insert_date
item['request_url'] = request_url
item['page_name'] = page_name
item['data_info'] = data_info
item['data_list'] = data_list
yield item
#请求spu下的所有分页
if data_info != None:
page_size = data_info.get('pageSize')
totalCount = data_info.get('totalCount')
current_page = data_info.get('currentPage')
print('category_name:{}'.format(category_name))
print('page_name:{}'.format(page_name))
print('page_size:{}'.format(page_size))
print('totalCount:{}'.format(totalCount))
print('current_page:{}'.format(current_page))
if int(current_page) * int(page_size) < int(totalCount):
og_url = response.url
s_value_list = re.findall('&(s=\d*)', og_url)
if len(s_value_list) == 0 and int(current_page) == 1:
new_url = og_url+'&s=50'
else:
new_url = og_url.replace(s_value_list[0], 's=%d'%(int(page_size)*int(current_page)))
print('新的分页url:{}'.format(new_url))
yield Request(url=new_url, callback=self.parse_content, meta={'category_name': category_name, 'category_url':category_url})
#每个产品下的所有销售
for data in data_list:
new_url = data.get('url')
new_url = 'https:'+new_url
print('spulist 下的新请求的url:{}'.format(new_url))
yield Request(url=new_url, callback=self.parse_content, meta={'category_name': category_name,'category_name_level_2': data.get('title'), 'category_url':category_url})
return
def parser_item_detail(self, response):
content = response.text
url = response.url
file_name = re.findall(r'id=(\d*)&', url)[0]
file_name = os.path.join('web', file_name)
file = open(file_name, mode='w', encoding='unicode_escape')
# with open(url.replace('/','_'), mode='w', encoding='unicode_escape') as file:
file.write(content)
file.close()
self.logger.debug(file_name)
self.logger.debug('写入完毕')
self.logger.debug('parser_item_detail')
self.logger.debug(response.url)
config = TaobaoItemDetailParse.get_item_config(content)
self.logger.debug(config) | [
"18316551437@163.com"
] | 18316551437@163.com |
b06dea7eb98b6918a57396efd4774805d8c1fe4e | 080807483ab794f1ad3974a41341454f9939075d | /Flask/flaskr/models.py | 6b06dd0ce32839c5df1e1ed14df28ffb3681d828 | [] | no_license | maru919/ramen_fortune | 8adb0fccd39dd6933942e6048a5a339b9f88ac22 | 9bfbec65cafb71f9034dcf0284fd0db061fe2313 | refs/heads/master | 2020-05-31T17:25:30.299270 | 2019-06-04T16:26:08 | 2019-06-04T16:26:08 | 190,408,040 | 0 | 0 | null | 2019-06-05T14:15:14 | 2019-06-05T14:15:14 | null | UTF-8 | Python | false | false | 728 | py | from flaskr import db
class Entry(db.Model):
__tablename__ = 'entries'
id = db.Column(db.Integer, primary_key=True)
q1 = db.Column(db.Text)
q2 = db.Column(db.Text)
q3 = db.Column(db.Text)
q4 = db.Column(db.Text)
q5 = db.Column(db.Text)
q6 = db.Column(db.Text)
q7 = db.Column(db.Text)
q8 = db.Column(db.Text)
q9 = db.Column(db.Text)
q10 = db.Column(db.Text)
def __repr__(self):
return '<Entry id={id} q1={q1} q2={q2} q3={q3} q4={q4} q5={q5} q6={q6} q7={q7} q8={q8} q9={q9} q10={q10}>'.format(
id=self.id,q1=self.q1, q2=self.q2,q3=self.q3,q4=self.q4,q5=self.q5,q6=self.q6,q7=self.q7,q8=self.q8,q9=self.q9,q10=self.q10)
def init():
db.create_all()
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.