blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90f47ef7ae4c651ca9ff4a3a3a44c38f3c6ef035 | 847c49e79aa0f967d2ee41e759ab2a4c0cca0f53 | /src/TriProjectile.py | 57a471748bf85b9fbbb5a5178742df4d1ee6bac5 | [] | no_license | lukeg101/G51FSE-Pygame-Project | f77a9f9e0169167e306f360879c96eb286770be0 | 15f6ccd7c609ab8fbf4371ec87407e1f802f7849 | refs/heads/master | 2020-04-19T19:21:58.751937 | 2015-01-05T18:41:12 | 2015-01-05T18:41:12 | 17,424,074 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | #import game modules
import sys, pygame
from pygame.sprite import Sprite
from pygame.locals import *
"""class that defines and characterises projectile objects within the game"""
class TriProjectile(Sprite):
#need to do different types of object and strength
#initialises projectile object upon invocation
def __init__(self, spawnCoords, direction):
pygame.sprite.Sprite.__init__(self)
self.fireDirection = direction
#load the image for the ship
self.curImage = pygame.image.load("plasmaProjectile1.png")
self.curImage = pygame.transform.scale(self.curImage, (15, 15))
self.image = self.curImage
#load the rectangle image behind the sprite
self.rect = self.image.get_rect()
self.rect.center = spawnCoords
#method updates sprite state - moves projectiles along a linear path
def update(self):
if (self.fireDirection == 1):
self.rect.x += 1.1
elif (self.fireDirection == 0):
self.rect.x -= 1.1
self.rect.y -= 2
| [
"lukegeeson@googlemail.com"
] | lukegeeson@googlemail.com |
3d0676ba2f791a63793876a7f9cac6d72829b2f7 | f845225329fa9750c838bf511fed3beb48cc86af | /accounts/migrations/0024_auto_20190104_1910.py | 548b1c2d5d7579453106f5fb740c98f2b35c2c4a | [] | no_license | Fabricourt/btre_project- | ac8c2b84cc8b7f4f5368a204dc23b378d488b356 | 13defd495ba309ac31550d22ad7d6306638f91eb | refs/heads/master | 2020-04-15T11:03:05.980170 | 2019-01-08T11:16:56 | 2019-01-08T11:16:56 | 164,611,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 2.1.4 on 2019-01-04 16:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0023_dashboard_title'),
]
operations = [
migrations.AlterField(
model_name='dashboard',
name='title',
field=models.CharField(blank=True, default=None, max_length=200),
),
]
| [
"mfalme2030@gmail.com"
] | mfalme2030@gmail.com |
07626bcccc8172b3f4f025086395a71babd6ebd0 | 799691e93da6cc46c43a588ea7861b250bffb1ff | /learning/base_2/hui_diao_def.py | 205c2f558dd32037706dd1373268974ef8c36234 | [] | no_license | fayzoro/some_spiders_to_test | 73b442e92a19d78b90cbaf1f33eda7cdefb02212 | 2630c90160c52e43894e763000bd40cea21e3009 | refs/heads/master | 2020-07-29T17:49:37.414094 | 2020-02-22T08:38:16 | 2020-02-22T08:38:16 | 209,908,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,714 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : hui_diao_def.py
@Contact : 625711951@qq.com
@License : (C)Copyright 2019-2020, Zyf-FT
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2019/10/17 20:59 zyfei 1.0 None
'''
import re
import lxml
import urllib.parse
import urllib.request
import random
import re
def download(url, user_agent='wswp', num_retries=2):
'''
设置用户代理, 下载错误后重试
:param url: 下载地址
:param user_agent: 用户
:param num_retries: 重试次数
:return: 下载的页面
'''
headers = {'User-agent': user_agent}
print('downloading...', url)
request = urllib.request.Request(url, headers=headers)
try:
response = urllib.request.urlopen(request).read()
html = response.decode('utf-8')
except urllib.request.URLError as e:
print('download error:', e.reason)
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry download
download(url, user_agent='wswp', num_retries=num_retries-1)
return html
def link_crawler(seed_url, link_regex):
'''
:param seed_url:
:param link_regex:
:return:
'''
crawl_queue = [seed_url]
seen = set(crawl_queue)
while crawl_queue:
url = crawl_queue.pop()
print('downloading...', url)
html = download(url)
link_list = get_links(html)
print('link_list =', link_list)
for link in link_list:
link = urllib.parse.urljoin(seed_url, link)
# 判断link是否已经被执行过
if link in seen:
continue
if re.match(link_regex, link):
seen.add(link)
crawl_queue.append(link)
print('crawl_queue =', crawl_queue)
def get_links(html):
'''
:param html:
:return:
'''
webpage_regex = re.compile('<a[^> + href=["\'](.*?)["\']', re.IGNORECASE)
return webpage_regex.findall(html)
# pattern_1 = '<li><a href="(.*?)" title="'
# webpage_regex = re.compile(pattern=pattern_1)
# return webpage_regex.findall(htmls)
# def scrape_callback(url, htmls):
# '''
#
# :param url:
# :param htmls:
# :return:
# '''
# if re.search('/view/', url):
# tree = lxml.htmls.fromstring(htmls)
# row = [tree.cssselect('table > tr#places_%s_row > td.w2p_fw' % field)[0].text_content() for field in FIELDS]
# print(url, row)
# pass
if __name__ == '__main__':
pass
link_crawler('http://example.webscraping.com', '/(index|view)') | [
"zhangyafei.912@163.com"
] | zhangyafei.912@163.com |
343338241b0c67481daef30911f6bfbb1e96bed9 | 120120e2f7ed014dac87cceb55a49a11344aea0c | /roles/custom_module/module_utils/ibm_ss_filesystem_utils.py | b6f6ca50cc4dbd75feebcf6298c7adeeeedc40a2 | [
"Apache-2.0"
] | permissive | Perf-Org-5KRepos/ibm-spectrum-scale-install-infra | 4d9ba3e04b6bae1681de0b8062b872e092513c58 | 6c070b41985b5fe0549ed88813a29a9d96df8480 | refs/heads/master | 2022-11-09T22:08:27.028901 | 2020-06-26T03:04:09 | 2020-06-26T03:04:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,703 | py | #!/usr/bin/python
#
# Copyright 2020 IBM Corporation
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from ibm_ss_utils import runCmd, parse_simple_cmd_output, GPFS_CMD_PATH, \
RC_SUCCESS, SpectrumScaleException
class SpectrumScaleFS:
def __init__(self, device_name, filesystem_properties):
self.device_name = device_name
self.properties_list = filesystem_properties
def __get_property_as_str(self, prop_name):
str_prop_value = ""
for fs_property in self.properties_list:
if prop_name in fs_property["fieldName"]:
str_prop_value = fs_property["data"]
return str_prop_value
def __get_property_as_int(self, prop_name):
int_prop_value = 0
for fs_property in self.properties_list:
if prop_name in fs_property["fieldName"]:
int_prop_value = int(fs_property["data"])
return int_prop_value
def __get_property_as_bool(self, prop_name):
bool_prop_value = False
for fs_property in self.properties_list:
if prop_name in fs_property["fieldName"]:
if ("Yes" in fs_property["data"] or
"yes" in fs_property["data"]):
bool_prop_value = True
return bool_prop_value
def get_device_name(self):
return self.device_name
def get_syspool_min_fragment_size(self):
syspool_min_fragment_size = 0
for fs_property in self.properties_list:
if ("minFragmentSize" in fs_property["fieldName"] and
"system pool" in fs_property["remarks"]):
syspool_min_fragment_size = int(fs_property["data"])
return syspool_min_fragment_size
def get_other_pool_min_fragment_size(self):
other_pool_min_fragment_size = 0
for fs_property in self.properties_list:
if ("minFragmentSize" in fs_property["fieldName"] and
"other pools" in fs_property["remarks"]):
other_pool_min_fragment_size = int(fs_property["data"])
return other_pool_min_fragment_size
def get_inode_size(self):
return self.__get_property_as_int("inodeSize")
def get_indirect_block_size(self):
return self.__get_property_as_int("indirectBlockSize")
def get_default_metadata_replicas(self):
return self.__get_property_as_int("defaultMetadataReplicas")
def get_max_metadata_replicas(self):
return self.__get_property_as_int("maxMetadataReplicas")
def get_default_data_replicas(self):
return self.__get_property_as_int("defaultDataReplicas")
def get_max_data_replicas(self):
return self.__get_property_as_int("maxDataReplicas")
def get_block_allocation_type(self):
return self.__get_property_as_str("blockAllocationType")
def get_file_locking_semantics(self):
return self.__get_property_as_str("fileLockingSemantics")
def get_acl_semantics(self):
return self.__get_property_as_str("ACLSemantics")
def get_num_nodes(self):
return self.__get_property_as_int("numNodes")
def get_syspool_block_size(self):
syspool_block_size = 0
for fs_property in self.properties_list:
if ("blockSize" in fs_property["fieldName"] and
"system pool" in fs_property["remarks"]):
syspool_block_size = int(fs_property["data"])
return syspool_block_size
def get_other_pool_block_size(self):
other_pool_block_size = 0
for fs_property in self.properties_list:
if ("blockSize" in fs_property["fieldName"] and
"other pools" in fs_property["remarks"]):
other_pool_block_size = int(fs_property["data"])
return other_pool_block_size
def get_quotas_accounting_enabled(self):
return self.__get_property_as_str("quotasAccountingEnabled")
def get_quotas_enforced(self):
return self.__get_property_as_str("quotasEnforced")
def get_default_quotas_enabled(self):
return self.__get_property_as_str("defaultQuotasEnabled")
def get_per_fileset_quotas(self):
return self.__get_property_as_bool("perfilesetQuotas")
def is_fileset_df_enabled(self):
return self.__get_property_as_bool("filesetdfEnabled")
def get_filesystem_version(self):
return self.__get_property_as_str("filesystemVersion")
def get_filesystem_version_local(self):
return self.__get_property_as_str("filesystemVersionLocal")
def get_filesystem_version_manager(self):
return self.__get_property_as_str("filesystemVersionManager")
def get_filesystem_version_original(self):
return self.__get_property_as_str("filesystemVersionOriginal")
def get_filesystem_highest_supported(self):
return self.__get_property_as_str("filesystemHighestSupported")
def get_create_time(self):
return self.__get_property_as_str("create-time")
def is_dmapi_enabled(self):
return self.__get_property_as_bool("DMAPIEnabled")
def get_logfile_size(self):
return self.__get_property_as_int("logfileSize")
def is_exact_m_time(self):
return self.__get_property_as_bool("exactMtime")
def get_suppress_atime(self):
return self.__get_property_as_str("suppressAtime")
def get_strict_replication(self):
return self.__get_property_as_str("strictReplication")
def is_fast_ea_enabled(self):
return self.__get_property_as_bool("fastEAenabled")
def is_encrypted(self):
return self.__get_property_as_bool("encryption")
def get_max_number_of_inodes(self):
return self.__get_property_as_int("maxNumberOfInodes")
def get_max_snapshot_id(self):
return self.__get_property_as_int("maxSnapshotId")
def get_uid(self):
return self.__get_property_as_str("UID")
def get_log_replicas(self):
return self.__get_property_as_int("logReplicas")
def is_4k_aligned(self):
return self.__get_property_as_bool("is4KAligned")
def is_rapid_repair_enabled(self):
return self.__get_property_as_bool("rapidRepairEnabled")
def get_write_cache_threshold(self):
return self.__get_property_as_int("write-cache-threshold")
def get_subblocks_per_full_block(self):
return self.__get_property_as_int("subblocksPerFullBlock")
def get_storage_pools(self):
storage_pool_list = []
storage_pool_str = self.__get_property_as_str("storagePools")
if storage_pool_str:
storage_pool_list = storage_pool_str.split(";")
return storage_pool_list
def is_file_audit_log_enabled(self):
return self.__get_property_as_bool("file-audit-log")
def is_maintenance_mode(self):
return self.__get_property_as_bool("maintenance-mode")
def get_disks(self):
disk_list = []
disk_str = self.__get_property_as_str("disks")
if disk_str:
disk_list = disk_str.split(";")
return disk_list
def is_automatic_mount_option_enabled(self):
return self.__get_property_as_bool("automaticMountOption")
def get_additional_mount_options(self):
return self.__get_property_as_str("additionalMountOptions")
def get_default_mount_point(self):
return self.__get_property_as_str("defaultMountPoint")
def get_mount_priority(self):
return self.__get_property_as_int("mountPriority")
def get_properties_list(self):
return self.properties_list
def to_json(self):
# TODO: Include Filesystem Device Name
return json.dumps(self.properties_list)
def print_filesystem(self):
print("Device Name : {0}".format(self.get_device_name()))
print("Syspool Min Fragment Size : {0}".format(self.get_syspool_min_fragment_size()))
print("Other Pool Min Fragment Size : {0}".format(self.get_other_pool_min_fragment_size()))
print("Inode Size : {0}".format(self.get_inode_size()))
print("Indirect Block Size : {0}".format(self.get_indirect_block_size()))
print("Default Metadata Replicas : {0}".format(self.get_default_metadata_replicas()))
print("Max Metadata Replicas : {0}".format(self.get_max_metadata_replicas()))
print("Default Data Replicas : {0}".format(self.get_default_data_replicas()))
print("Max Data Replicas : {0}".format(self.get_max_data_replicas()))
print("Block Allocation Type : {0}".format(self.get_block_allocation_type()))
print("File Locking Semantics : {0}".format(self.get_file_locking_semantics()))
print("ACL Semantics : {0}".format(self.get_acl_semantics()))
print("Num Nodes : {0}".format(self.get_num_nodes()))
print("Syspool Block Size : {0}".format(self.get_syspool_block_size()))
print("Other Pool Block Size : {0}".format(self.get_other_pool_block_size()))
print("Quotas Accounting Enabled : {0}".format(self.get_quotas_accounting_enabled()))
print("Quotas Enforced : {0}".format(self.get_quotas_enforced()))
print("Default Quotas Enabled : {0}".format(self.get_default_quotas_enabled()))
print("Per Fileset Quotas : {0}".format(self.get_per_fileset_quotas()))
print("Fileset df Enabled : {0}".format(self.is_fileset_df_enabled()))
print("Filesystem Version : {0}".format(self.get_filesystem_version()))
print("Filesystem Version Local : {0}".format(self.get_filesystem_version_local()))
print("Filesystem Version Manager : {0}".format(self.get_filesystem_version_manager()))
print("Filesystem Version Original : {0}".format(self.get_filesystem_version_original()))
print("Filesystem Highest Supported : {0}".format(self.get_filesystem_highest_supported()))
print("Create Time : {0}".format(self.get_create_time()))
print("DMAPI Enabled : {0}".format(self.is_dmapi_enabled()))
print("Logfile Size : {0}".format(self.get_logfile_size()))
print("Is Exact m Time : {0}".format(self.is_exact_m_time()))
print("Suppress atime : {0}".format(self.get_suppress_atime()))
print("Strict Replication : {0}".format(self.get_strict_replication()))
print("Is Fast EA Enabled : {0}".format(self.is_fast_ea_enabled()))
print("Is Encrypted : {0}".format(self.is_encrypted()))
print("Max Number Of Inodes : {0}".format(self.get_max_number_of_inodes()))
print("Max Snapshot Id : {0}".format(self.get_max_snapshot_id()))
print("UID : {0}".format(self.get_uid()))
print("Log Replicas : {0}".format(self.get_log_replicas()))
print("Is 4K Aligned : {0}".format(self.is_4k_aligned()))
print("Is Rapid Repair Enabled : {0}".format(self.is_rapid_repair_enabled()))
print("Write Cache Threshold : {0}".format(self.get_write_cache_threshold()))
print("Subblocks Per Full Block : {0}".format(self.get_subblocks_per_full_block()))
print("Storage Pools : {0}".format(self.get_storage_pools()))
print("Is File Audit Log Enabled : {0}".format(self.is_file_audit_log_enabled()))
print("Is Maintenance Mode : {0}".format(self.is_maintenance_mode()))
print("Disks : {0}".format(self.get_disks()))
print("Is Automatic Mount Option Enabled : {0}".format(self.is_automatic_mount_option_enabled()))
print("Additional Mount Options : {0}".format(self.get_additional_mount_options()))
print("Default Mount Point : {0}".format(self.get_default_mount_point()))
print("Mount Priority : {0}".format(self.get_mount_priority()))
@staticmethod
def get_filesystems():
filesystem_info_list = []
stdout, stderr, rc = runCmd([os.path.join(GPFS_CMD_PATH, "mmlsfs"),
"all", "-Y"],
sh=False)
if rc != RC_SUCCESS:
if 'mmlsfs: No file systems were found.' in stdout or \
'mmlsfs: No file systems were found.' in stderr:
return filesystem_info_list
raise SpectrumScaleException("Retrieving filesystem information failed",
"mmlsfs",
["all", "-Y"],
rc, stdout, stderr)
filesystem_dict = parse_simple_cmd_output(stdout, "deviceName",
"properties", "filesystems")
filesystem_list = filesystem_dict["filesystems"]
for filesystem in filesystem_list:
device_name = filesystem["deviceName"]
fs_properties = filesystem["properties"]
filesystem_instance = SpectrumScaleFS(device_name,
fs_properties)
filesystem_info_list.append(filesystem_instance)
return filesystem_info_list
@staticmethod
def unmount_filesystems(node_name, wait=True):
cmd = [os.path.join(GPFS_CMD_PATH, "mmumount"), "all", "-N", node_name]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
if 'mmumount: No file systems were found' in stdout or \
'mmumount: No file systems were found' in stderr:
# We can claim success on umount if there are no filesystems
return RC_SUCCESS
raise SpectrumScaleException("Unmounting filesystems on node failed",
cmd[0], cmd[1:], rc, stdout, stderr)
return rc, stdout
@staticmethod
def create_filesystem(name, stanza_path, block_size,
default_metadata_replicas,
default_data_replicas, num_nodes,
automatic_mount_option,
default_mount_point):
cmd = [os.path.join(GPFS_CMD_PATH, "mmcrfs"), name,
"-F", stanza_path,
"-B", block_size,
"-m", default_metadata_replicas,
"-r", default_data_replicas,
"-n", num_nodes,
"-A", automatic_mount_option,
"-T", default_mount_point]
# TODO: Make this idempotent
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Create filesystems on node failed",
cmd[0], cmd[1:], rc, stdout, stderr)
return rc, stdout
@staticmethod
def delete_filesystem(name):
# TODO: Implement
rc = RC_SUCCESS
msg = ""
return rc, msg
def main():
filesystem_list = get_filesystems()
for filesystem in filesystem_list:
filesystem.print_filesystem()
print("\n")
if __name__ == "__main__":
main()
| [
"mutmuthi@in.ibm.com"
] | mutmuthi@in.ibm.com |
1fbd2d7dfc262d86cd8d617fb1e95b5ed1256ea5 | c0ec756aa1b9fc3e4a22fef689fa218c2ce05919 | /utils/tools.py | 1e3d4127852ddca3f234ebd080bfddc65027b857 | [] | no_license | daniel-pimenta-cc/GetComicsDownloader | b813b05f96d3af50a8f3a4d916f72fa1391c009a | 36735a1c3ebc4f7c48b4bb012a43e1c6e1359289 | refs/heads/master | 2020-05-17T10:13:03.089358 | 2019-04-26T15:23:17 | 2019-04-26T15:23:17 | 183,651,840 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | #!/usr/bin/python3
# -*-coding:utf-8 -*-
defs = {'KB': 1024, 'MB': 1024**2, 'GB': 1024**3, 'TB': 1024**4}
# Convert to bytes
def convert2bytes(size):
parts = size.split()
size = parts[0]
unit = parts[1]
return int(size)*defs[unit]
# Convert with corret unit
def bytes_2_human_readable(number_of_bytes):
if number_of_bytes < 0:
raise ValueError("!!! number_of_bytes can't be smaller than 0 !!!")
step_to_greater_unit = 1024.
number_of_bytes = float(number_of_bytes)
unit = 'bytes'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'KB'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'MB'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'GB'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'TB'
precision = 1
number_of_bytes = round(number_of_bytes, precision)
return str(number_of_bytes) + ' ' + unit
| [
"42495364+daniel-pimenta-cc@users.noreply.github.com"
] | 42495364+daniel-pimenta-cc@users.noreply.github.com |
8fed4efecd80e64f21bbf26c0c885974ca96acdf | 65a6fde208f3a00a4d26158a4e8cfdd53e21f8c7 | /Standard feature descriptors in pyhon/humoment.py | e949a2fea3c78e18d1f3fba8a8bb5f071ebf7379 | [] | no_license | rishabh26malik/Matlab-and-image-processing | ef9d66784e8bf89b6df163847cf83e3c9dabefba | 954f18eeeb5988d64fe708ab2d631be5296c9f85 | refs/heads/master | 2021-07-10T08:12:59.061454 | 2020-08-24T10:10:53 | 2020-08-24T10:10:53 | 191,395,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import cv2
import os, os.path
import csv
'''
image = cv2.imread("img.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
x=cv2.HuMoments(cv2.moments(image)).flatten()
print(x)
'''
imageDir = "/home/rishabh/PREVIOUS HOME/CODES_descriptors/brain_tumour/qwerty" #specify your path here
image_path_list = []
valid_image_extensions = [".jpg", ".jpeg", ".png", ".tif", ".tiff"] #specify your vald extensions here
valid_image_extensions = [item.lower() for item in valid_image_extensions]
for file in os.listdir(imageDir):
extension = os.path.splitext(file)[1]
if extension.lower() not in valid_image_extensions:
continue
image_path_list.append(os.path.join(imageDir, file))
arr=[]
for imagePath in image_path_list:
a=[]
image = cv2.imread(imagePath)
if image is not None:
imgGrey=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#hist = hog.compute(imgGrey,winStride,padding,locations)
x=cv2.HuMoments(cv2.moments(imgGrey)).flatten()
elif image is None:
print ("Error loading: " + imagePath)
continue
for i in x:
a.append(i)
arr.append(a)
#print(type(x))
print((arr))
#for i in arr:
# print(len(i))
with open('check_file.csv', 'w',newline='') as fp:
a1=csv.writer(fp)
#for row in arr:
a1.writerows(arr)
| [
"noreply@github.com"
] | noreply@github.com |
1960f27aab4a6e04b44d42cae1957586f552c1e4 | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/battle_control/requests/__init__.py | cb82e19a0f4681639e117250b900737bead761e8 | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 491 | py | # 2015.11.18 11:52:01 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/battle_control/requests/__init__.py
from gui.battle_control.requests.AvatarRequestsController import AvatarRequestsController
__all__ = ['AvatarRequestsController']
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\battle_control\requests\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:52:01 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
f5b5956712c08622e982068a84f0eeadb005ccbd | 4a36526cdf4c8ec199fc597055a396d42dd364c2 | /node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi | bac4dedf3d7a0f7732bfeccc38c0ed2190dba4bf | [
"MIT"
] | permissive | alanayoub/watchr | 0a216a297cab777b421d4d1a5e4120307e364490 | c44dc75596a4ddbce8f1c814f6aeeddc713d20c9 | refs/heads/master | 2021-01-18T00:29:04.066117 | 2015-09-30T19:12:37 | 2015-09-30T19:12:37 | 15,404,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,004 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 47,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"nodedir": "/home/alan/.node-gyp/0.10.25",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"always_auth": "",
"user_agent": "node/v0.10.25 linux x64",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "null",
"userconfig": "/home/alan/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/home/alan/tmp",
"depth": "null",
"save_dev": "",
"usage": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/usr/bin/zsh",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/alan/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "18",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "v0.10.25",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/alan/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": ""
}
}
| [
"alanayoub@gmail.com"
] | alanayoub@gmail.com |
dcfbcd9b0e036f17bbc310e385f9a7d9f2753846 | 251f05c594b2a1c3b34a5aeebf1ca6212f8d7ffb | /python/customcircle.py | 2a31d4a155ba875de675e2fd764142dbd671e3fc | [] | no_license | Zahidsqldba07/code_repo-ML | c86ff3c54c38b1c7667f1d4957375f6c2f4dbea8 | 72e7931a7239e4ea372a35330c3f0763e0ba1c43 | refs/heads/master | 2023-03-16T03:14:30.278736 | 2019-11-08T20:11:54 | 2019-11-08T20:11:54 | 513,224,923 | 1 | 0 | null | 2022-07-12T16:59:33 | 2022-07-12T16:59:32 | null | UTF-8 | Python | false | false | 1,182 | py | class circle:
def __init__(self, center=(0,0), radius=1):
if radius < 0:
raise ValueError("negative radius")
self.center = center
self.radius = radius
def get_radius(self):
return self.radius
def get_center(self):
return self.center
def get_area(self):
from math import pi
return pi*self.radius*self.radius
def get_circumference(self):
from math import pi
return 2*pi*self.radius
def move(self,pt):
self.center(pt)
def grow(self):
self.radius+=1
def shrink(self):
if self.radius>0:
self.radius -= 1
def main():
c1=circle((2,4),5)
print(c1,dir(c1))
c2=circle((0,0),1)
print(c1.get_radius())
print(c1.get_center())
print(c1.get_area())
print(c1.get_circumference())
print(c2.get_radius())
print(c2.get_center())
print(c2.get_area())
print(c2.get_circumference())
main()
from custom1 import *
c=circle((0,0),4)
c.get_radius()
| [
"57429294+Gurtejkaur@users.noreply.github.com"
] | 57429294+Gurtejkaur@users.noreply.github.com |
4829979e77f32635a97c9321a5dd8bd5767ac20e | 8ad01ae1a148bdb5c4d597dec5c62053346dc30a | /python/lib/utils/peltierOp.py | 6ec62f82b7022388f70a7e1e7f8c98991ea75c95 | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | NXP/tpmp_ctrl | baac374caf2caca8a7993ba3ae98b5e4aceae527 | a1e9097b931f706ef922dba1497e00ba6fa63bc8 | refs/heads/master | 2023-02-21T01:47:27.807770 | 2020-10-14T13:33:19 | 2020-10-15T09:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | #!/usr/bin/env python
# Copyright 2019 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Author: Jerome NEANNE
# Mail: jerome.neanne@nxp.com
# description: wrapper for Peltier control
import sys
sys.path.insert(0, '../../') #ToDo: cleanup: move or remove this file
import basicSerialAuto
def setPeltierImmediate(termType, objInst):
if termType == "mini":
print ("To be implemented")
else:
print ("Set Peltier to: " + objInst.tempPeltier)
basicSerialAuto.setPeltierImmediate(objInst)
def setPeltier(termType, objInst):
if termType == "mini":
print ("To be implemented")
else:
print ("Set Peltier to: " + objInst.tempPeltier)
basicSerialAuto.setPeltier(objInst)
def readPeltier(termType, objInst):
if termType == "mini":
print ("To be implemented")
else:
print ("Read Peltier")
return basicSerialAuto.readPeltier(objInst)
| [
"philippe.mazet@nxp.com"
] | philippe.mazet@nxp.com |
936982e0bea573b7f271241524b520589ff59504 | 401c5d1a67005388e9f4e55eff4110b9be2027c2 | /Login.py | e12aa13ee42cecc1b9ce7c00f8880594564e62f5 | [] | no_license | lucasarieiv/Login_Cadastro | c98bb07c04d5bd2b5c67b9f53d11fea37eaafcfa | 3c6f4bb3cf681fd77b77f829faa9dd91acec5cfe | refs/heads/master | 2021-04-25T16:26:36.364474 | 2018-03-03T05:29:12 | 2018-03-03T05:29:12 | 121,457,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | from Cadastro import *
import os
def resPass():
print('Digite 1- Para Fazer Login') #Printa Esse texto
print('Digite 2- Para Cadastrar')
print('Digite 3- Para Sair')
resp = input() #Essa Linha ta uma entrada
return resp
while True:
resp = resPass()
if resp == '1':
print('Bem Vindo Ao Nosso Programa')
login = input('Digite Seu Login: ')
for u in os.listdir('Cadastros\\'):
if (login.upper() + '.txt') == u:
senha = input('Digite Sua Senha: ')
uLog = open('Cadastros\\' + u, 'r')
cLog = uLog.read()
cont = 0
for ul in cLog.split('\n'):
cont += 1
if cont == 3:
if ul == senha.upper():
print('Bem Vindo')
elif ul != senha.upper():
while ul != senha.upper():
print('Senha Incorreta!')
senha = input('Digite Sua Senha Novamente: ')
print('Bem Vindo!')
uLog.close()
elif resp == '2':
cadastroPessoas()
elif resp == '3':
break
| [
"tiomedianope@gmail.com"
] | tiomedianope@gmail.com |
b7d28915d0260bdbaa5ecdcd90430b17255444b9 | 9f8aad9802e385dfb8d176f4f954c92d88a78f66 | /PythonPandas/panda_operations.py | b5dac97325b369c39e370c8554b4b4b6de808642 | [] | no_license | Khokavim/Python-Advanced | 8b33def089cb18c6b381cc91afed533dc8dc8ad9 | 977ca26ed410969098a1e034750276d99f628ceb | refs/heads/master | 2020-04-08T16:57:18.203355 | 2018-12-04T22:37:45 | 2018-12-04T22:37:45 | 159,543,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,458 | py | #Pandas library helps with importing datasets , working with series, dataframes
import pandas as pd
import numpy_operations as np
from pandas import Series, DataFrame
# Series and Data Frame are two data structures available in python
series_a = Series([5,4,3,2,1])# a simple series
print(series_a) # A series is represented by index on the left and values on the right
print(series_a.values) # similar to dictionary. ".values" command returns values in a series
print(series_a.index) # returns the index values of the series
# series_b = Series([5,4,3,2,1,-7,-29], index =['a','b','c','d','e','f','h']) # The index is specified
# print (series_b) # try series_b.index and series_b.values
# print (series_b['a']) # selecting a particular value from a Series, by using index
#
# series_b['d'] = 9 # change the value of a particular element in series
# print (series_b)
# series_b ([['a','b','c']]) # select a group of values
#
# print (series_b[series_b>0]) # returns only the positive values
# print (series_b *2) # multiplies 2 to each element of a series
#
# np.mean(series_b) # you can apply numpy functions to a Series
#
# print ('b' in series_b) # checks whether the index is present in Series or not
# print ('z' in series_b)
#
# player_salary ={'Rooney': 50000, 'Messi': 75000, 'Ronaldo': 85000, 'Fabregas':40000, 'Van persie': 67000}
# new_player = Series(player_salary)# converting a dictionary to a series
# print (new_player) # the series has keys of a dictionary
#
# players =['Klose', 'Messi', 'Ronaldo', 'Van persie', 'Ballack']
# player_1 =Series(player_salary, index= players)
# print (player_1) # Changed the index of the Series. Since, no value was not found for Klose and Ballack, it appears as NAN
#
# pd.isnull(player_1)#checks for Null values in player_1, pd denotes a pandas dataframe
#
# pd.notnull(player_1)# Checks for null values that are not Null
#
# player_1.name ='Bundesliga players' # name for the Series
# player_1.index.name='Player names' #name of the index
# print(player_1)
#
# player_1.index =['Neymar', 'Hulk', 'Pirlo', 'Buffon', 'Anderson'] # is used to alter the index of Series
# print(player_1)
#
#
# #Data_frames
# states ={'State' :['Kaduna', ' Lagos', 'Kano', 'Plateau', 'Zamfara'],
# 'Population': [360, 4400, 6798,8987,3400],
# 'Language' :['Hausa', 'Yoruba', 'Berom', 'Kagoro', 'Igbo']}
# nigeria = DataFrame(states) # creating a data frame
# print(nigeria)
#
# DataFrame(states, columns=['State', 'Language', 'Population']) # change the sequence of column index
#
# new_frame = DataFrame(states, columns=['State', 'Language', 'Population', 'Per Capita Income'], index =['a','b','c','d','e'])
# #if you pass a column that isnt in states, it will appear with Na values
#
# print (new_frame.columns)
# print (new_frame['State'] )# retrieveing data like dictionary
#
# new_frame.Population # like Series
#
# new_frame.ix[3] # rows can be retrieved using .ic function
# # here I have retrieved 3rd row
#
# print(new_frame)
#
# new_frame['Per Capita Income'] = 99 # the empty per capita income column can be assigned a value
# print(new_frame)
#
# new_frame['Per Capita Income'] = np.arange(5) # assigning a value to the last column
# print(new_frame)
#
# series = Series([44,33,22], index =['b','c','d'])
# new_frame['Per Capita Income'] = series
#
# #when assigning list or arrays to a column, the values lenght should match the length of the DataFrame
# print(new_frame)
# # again the missing values are displayed as NAN
#
# new_frame['Development'] = new_frame.State == 'Kaduna'# assigning a new column
# print (new_frame)
# del new_frame['Development'] # will delete the column 'Development'
# print(new_frame)
#
# new_data ={'Enugu': {2010: 72, 2012: 78, 2014 : 98},'Borno': {2010: 55, 2012: 34, 2014: 22}}
# elections = DataFrame(new_data)
# print (elections)# the outer dict keys are columns and inner dict keys are rows
# elections.T # transpose of a data frame
#
# DataFrame(new_data, index =[2012, 2014, 2016]) # you can assign index for the data frame
#
# ex= {'Kaduna':elections['Zamfara'][:-1], 'Nigeria': elections['Borno'][:2]}
# px =DataFrame(ex)
# px
#
# px.index.name = 'year'
# px.columns.name = 'politicians'
# print(px)
#
# print(px.values)
#
# series_c = Series([5,4,3,2,1,-7,-29], index =['a','b','c','d','e','f','h'])
# index = series_c.index
# print (index) #u denotes unicode
# print (index[1:])# returns all the index elements except a.
# index[1] = 'f' # you cannot modify an index element. It will generate an error. In other words, they are immutable
#
# print (px)
# print(2013 in px.index) # checks if 2003 is an index in data frame px
#
# var = Series(['Python', 'Java', 'c', 'c++', 'Php'], index =[5,4,3,2,1])
# print (var)
# var1 = var.reindex([1,2,3,4,5])# reindex creates a new object
# print (var1)
#
# var.reindex([1,2,3,4,5,6,7])# introduces new indexes with values Nan
#
# var.reindex([1,2,3,4,5,6,7], fill_value =1) # you can use fill value to fill the Nan values. Here I have used fill value as 1. You can use any value.
#
# series_d =Series(['Ibadan', 'Nassarawa', 'Borno'], index =[0,2,4])
# print (series_d)
# series_d.reindex(range(6), method ='ffill') #ffill is forward fill. It forward fills the values
#
# series_d.reindex(range(6), method ='bfill')# bfill, backward fills the values
#
# reshape = DataFrame(np.arange(9).reshape((3,3)),index =['a','b','c'], columns =['Ibadan','Nassarawa', 'Borno'])
# print(reshape)
#
# reshape_2 =reshape.reindex(['a', 'b', 'c', 'd'], columns = states) # reindexing columns and indices
# print(reshape)
#
# series_e = Series(np.arange(5), index =['a','b','c','d','e'])
# print (series_e)
# series_e.drop(['a','b']) #drop method will return a new object with values deleted from an axis
#
# states ={'State' :['Kaduna', 'Enugu', 'Nassarawa', 'lagos', 'Borno'],
# 'Population': [306, 944, 9867,899,340],
# 'Language' :['kagoro', 'Igbo', 'Margi', 'Kurama', 'Yoruba']}
# nigeria = DataFrame(states, columns =['State', 'Population', 'Language'])
# print (nigeria)
# nigeria.drop([0,1])# will drop index 0 and 1
#
# nigeria.drop(['State', 'Population'], axis =1 )# the function dropped population and state columns. Apply the same concept with axis =0
#
# series_f = Series(['Python', 'Java', 'c', 'c++', 'Php'], index =[5,4,3,2,1])
# print(series_f)
#
# print(series_f[5])
# print(series_f[2:4])
#
# print(series_f[[3,2,1]])
#
# print(var[var == 'Php'])
#
# states ={'State' :['Kaduna', 'Enugu', 'Nassarawa', 'lagos', 'Borno'],
# 'Population': [306, 944, 9867,899,340],
# 'Language' :['kagoro', 'Igbo', 'Margi', 'Kurama', 'Yoruba']}
# nigeria = DataFrame(states, columns =['State', 'Population', 'Language'])
# print(nigeria)
#
# print(nigeria[['Population', 'Language']]) # retrieve data from data frame
#
# print(nigeria[nigeria['Population'] > 50]) # returns data for population greater than 50
#
# print(nigeria[:3]) # first three rows
#
# # for selecting specific rows and columns, you can use ix function
# states ={'State' :['Kaduna', 'Enugu', 'Nassarawa', 'lagos', 'Borno'],
# 'Population': [306, 944, 9867,899,340],
# 'Language' :['kagoro', 'Igbo', 'Margi', 'Kurama', 'Yoruba']}
# nigeria = DataFrame(states, columns =['State', 'Population', 'Language'])
# print(nigeria)
#
# print(nigeria.ix[['a','b'], ['State','Language']]) # this is how you select subset of rows
| [
"meshachm126@gmail.com"
] | meshachm126@gmail.com |
6fcde301144499017298a0827f87196955b720fb | ead9783ed4ed47d252e756c5b1efe72395d6a194 | /prg1.py | ba5d84501a0be9e29445e8d7eaf324677a8902ae | [] | no_license | gunjanbansal9512/mca2ndSem | 17e7682dcb6fb652794843e2aad8263f1f24a4c7 | a07e49735d61f38b240774f2de2bc79abd29b6f4 | refs/heads/master | 2020-03-17T07:34:00.180887 | 2018-06-10T14:00:39 | 2018-06-10T14:00:39 | 133,403,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # Write a program to sum all the elements from n1 to n2 where n1 and n2 are positive number
n1=int(input("Enter the vslue of n1"))
n2=int(input("Enter the value of n2"))
sum=0
if(n1<n2):
for i in range(n1,n2+1):
sum=sum+i
else:
for i in range(n2,n1+1):
sum=sum+1
print("Sum of numbers between n1 and n2 is ",sum)
| [
"noreply@github.com"
] | noreply@github.com |
caf97bdb7a408cc4436da1c23a0ce726f007618f | afb1906e4226b6ecdced2dbf47b84d206d9bad34 | /16.guessing_game.py | 079e77563a0f8ed2171d0dca00b98979543fa85d | [] | no_license | raysonkong/python_mike_dane_tutorial | 7554dd0295039125366681fad031f459db7696f2 | 9a9b4ac760ae4a56a90ec6d22a170ace9e246632 | refs/heads/master | 2022-11-07T14:09:26.192882 | 2020-06-23T07:29:05 | 2020-06-23T07:29:05 | 269,577,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # 2:20:15
# Guessing Game
secret_word = "giraffe"
guess = ""
guess_count = 0
guess_limit = 3
out_of_guesses = False
while guess != secret_word and not(out_of_guesses):
if guess_count < guess_limit:
guess = input("Make a guess: ")
guess_count += 1
else:
out_of_guesses = True
if not(out_of_guesses):
print("You won!")
else:
print("You Lost!!") | [
"kong.rayson@gmail.com"
] | kong.rayson@gmail.com |
5de412aa9b80c0176b662be5d7ca07b97f2ce4fc | 1c83416f15e38fa67eb5ffc7582a40489e8bb452 | /zmq/zmq_client.py | 92aa1f4d940f12c4463959c85eb6099460644509 | [] | no_license | DisabledStudy/Python | 2f6696ae290efd42cb14a37cdde1187f4f8ca7ed | c02249d84b3c4de7e68274fe36631aa005e0d3fc | refs/heads/master | 2020-04-14T00:27:30.944082 | 2018-12-29T19:19:59 | 2018-12-29T19:20:35 | 163,533,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import zmq
host = '127.0.0.1'
port = 6789
context = zmq.Context()
client = context.socket(zmq.REQ)
client.connect("tcp://%s:%s" % (host, port))
for num in range(1, 6):
request_str = "message #%s" % num
request_bytes = request_str.encode('utf-8')
client.send(request_bytes)
reply_bytes = client.recv()
reply_str = reply_bytes.decode('utf-8')
print("Send %s, received %s" % (request_str, reply_str)) | [
"dmitry.belous89@gmail.com"
] | dmitry.belous89@gmail.com |
5f54a5d873a44447c0c7928baedb8afbf35fa3f9 | 8b37108a288433f362311408d885a30c3e1b77ef | /bin/pip3 | 9d0727633e26fa51c61244e5eac86cf33b2f4810 | [] | no_license | dfab646/frec | 7950529dd38230c1df698aa9ec2a80f2e52c315d | fdb150129a1dc8b381b126f955a274b2058b0287 | refs/heads/main | 2023-08-29T18:48:30.903316 | 2021-11-14T02:37:01 | 2021-11-14T02:37:01 | 427,786,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | #!/Users/x/d3v/frec/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"help@google.com"
] | help@google.com | |
030305ab4f62d57c7c891748c1539069d222cb70 | 35b5f50626d33b17f38f311e9d5fc6b727d25765 | /gui/kivy/uix/dialogs/invoices.py | 8904edf6fbc412e1c895baa64e3ebd02c67d15b9 | [
"MIT"
] | permissive | bitcoinpostquantum/electrumpq | 7fcf5a3fbda2b05f033340ba61fc23e46997f5ed | dbbc2a493aff904923cd8112fc5bb07802df272c | refs/heads/master | 2020-04-10T10:09:24.239700 | 2018-12-29T09:49:58 | 2018-12-29T09:49:58 | 160,957,005 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,810 | py | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from decimal import Decimal
Builder.load_string('''
<InvoicesLabel@Label>
#color: .305, .309, .309, 1
text_size: self.width, None
halign: 'left'
valign: 'top'
<InvoiceItem@CardItem>
requestor: ''
memo: ''
amount: ''
status: ''
date: ''
icon: 'atlas://gui/kivy/theming/light/important'
Image:
id: icon
source: root.icon
size_hint: None, 1
width: self.height *.54
mipmap: True
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.requestor
shorten: True
Widget
InvoicesLabel:
text: root.memo
color: .699, .699, .699, 1
font_size: '13sp'
shorten: True
Widget
BoxLayout:
spacing: '8dp'
height: '32dp'
orientation: 'vertical'
Widget
InvoicesLabel:
text: root.amount
font_size: '15sp'
halign: 'right'
width: '110sp'
Widget
InvoicesLabel:
text: root.status
font_size: '13sp'
halign: 'right'
color: .699, .699, .699, 1
Widget
<InvoicesDialog@Popup>
id: popup
title: _('Invoices')
BoxLayout:
id: box
orientation: 'vertical'
spacing: '1dp'
ScrollView:
GridLayout:
cols: 1
id: invoices_container
size_hint: 1, None
height: self.minimum_height
spacing: '2dp'
padding: '12dp'
''')
from kivy.properties import BooleanProperty
from electrumpq_gui.kivy.i18n import _
from electrumpq.util import format_time
from electrumpq.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrumpq_gui.kivy.uix.context_menu import ContextMenu
invoice_text = {
PR_UNPAID:_('Pending'),
PR_UNKNOWN:_('Unknown'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
pr_icon = {
PR_UNPAID: 'atlas://gui/kivy/theming/light/important',
PR_UNKNOWN: 'atlas://gui/kivy/theming/light/important',
PR_PAID: 'atlas://gui/kivy/theming/light/confirmed',
PR_EXPIRED: 'atlas://gui/kivy/theming/light/close'
}
class InvoicesDialog(Factory.Popup):
def __init__(self, app, screen, callback):
Factory.Popup.__init__(self)
self.app = app
self.screen = screen
self.callback = callback
self.cards = {}
self.context_menu = None
def get_card(self, pr):
key = pr.get_id()
ci = self.cards.get(key)
if ci is None:
ci = Factory.InvoiceItem()
ci.key = key
ci.screen = self
self.cards[key] = ci
ci.requestor = pr.get_requestor()
ci.memo = pr.get_memo()
amount = pr.get_amount()
if amount:
ci.amount = self.app.format_amount_and_units(amount)
status = self.app.wallet.invoices.get_status(ci.key)
ci.status = invoice_text[status]
ci.icon = pr_icon[status]
else:
ci.amount = _('No Amount')
ci.status = ''
exp = pr.get_expiration_date()
ci.date = format_time(exp) if exp else _('Never')
return ci
def update(self):
self.menu_actions = [('Pay', self.do_pay), ('Details', self.do_view), ('Delete', self.do_delete)]
invoices_list = self.ids.invoices_container
invoices_list.clear_widgets()
_list = self.app.wallet.invoices.sorted_list()
for pr in _list:
ci = self.get_card(pr)
invoices_list.add_widget(ci)
def do_pay(self, obj):
self.hide_menu()
self.dismiss()
pr = self.app.wallet.invoices.get(obj.key)
self.app.on_pr(pr)
def do_view(self, obj):
pr = self.app.wallet.invoices.get(obj.key)
pr.verify(self.app.wallet.contacts)
self.app.show_pr_details(pr.get_dict(), obj.status, True)
def do_delete(self, obj):
from .question import Question
def cb(result):
if result:
self.app.wallet.invoices.remove(obj.key)
self.hide_menu()
self.update()
d = Question(_('Delete invoice?'), cb)
d.open()
def show_menu(self, obj):
self.hide_menu()
self.context_menu = ContextMenu(obj, self.menu_actions)
self.ids.box.add_widget(self.context_menu)
def hide_menu(self):
if self.context_menu is not None:
self.ids.box.remove_widget(self.context_menu)
self.context_menu = None
| [
"code@bitcoinpq.org"
] | code@bitcoinpq.org |
0e1d3724fe43681c39b138b97a5645c3c11755d4 | a5a27a8a901786d7f05f2cc5d9d67eade7d9cc65 | /main.py | c1e01892fec33d15434ce471f111fac9019b8858 | [] | no_license | 120BPM/EthereumtoUSDpy | e417bd947e707c02a0bc043d772f87fef1f9b903 | 57018671212fd89027a8a5b937870ff021341e1f | refs/heads/master | 2020-03-28T20:55:47.341765 | 2017-07-26T15:14:11 | 2017-07-26T15:14:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import requests
from pylab import show
from bs4 import BeautifulSoup
res = requests.get('https://www.coingecko.com/zh-tw/%E5%8C%AF%E7%8E%87%E5%9C%96/%E4%BB%A5%E5%A4%AA%E5%9D%8A/usd/30%E5%A4%A9')
soup = BeautifulSoup(res.text, 'html.parser')
data_prices = soup.select('#coin_30d_historical_price_chart')[0].prettify('utf-8').decode('utf-8')
import re
m = re.search('<div data-prices="(.*?)"', data_prices)
import json
jd = json.loads(m.group(1))
#jd
import pandas
df = pandas.DataFrame(jd)
#df
df.columns = ['datetime', 'USD']
df['datetime'] = pandas.to_datetime(df['datetime'], unit='ms')
#df.head()
df.index = df['datetime']
df['USD'].plot(kind = 'line', figsize = [10,5])
show() | [
"noreply@github.com"
] | noreply@github.com |
13f357a7fd6b81224cd374d8f27073af42d95e2c | d23661e49d16736ce8f669b4439c7209a02ca858 | /sentiment/sentiment_analyzer.py | ea53b3d3652d3b7a4b7f9948a96945f2c7190ec5 | [] | no_license | jinghul/viral | 2585dcc1dc89239d35066a84cd601d4cab548a42 | 56a3b42fbb40b27b6b8049f57ad075223268b292 | refs/heads/master | 2020-05-01T04:06:27.666247 | 2019-04-09T05:28:57 | 2019-04-09T05:28:57 | 177,264,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,928 | py | # coding: utf-8
# Author: Lizi Liao, Liangming Pan, originaly by C.J. Hutto
# For license information, see LICENSE.TXT
"""
If you use the VADER sentiment analysis tools, please cite:
Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
Sentiment Analysis of Social Media Text. Eighth International Conference on
Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
"""
import io
import os
import numpy as np
from sklearn.model_selection import KFold
import math
import re
import string
import requests
import json
from itertools import product
from inspect import getsourcefile
from os.path import abspath, join, dirname
from sklearn.metrics import classification_report, precision_score, recall_score
##Constants##
# (empirically derived mean sentiment intensity rating increase for booster words)
B_INCR = 0.293
B_DECR = -0.293
# (empirically derived mean sentiment intensity rating increase for using
# ALLCAPs to emphasize a word)
C_INCR = 0.733
N_SCALAR = -0.74
# for removing punctuation
REGEX_REMOVE_PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation))
PUNC_LIST = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
NEGATE = \
["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
# booster/dampener 'intensifiers' or 'degree adverbs'
# http://en.wiktionary.org/wiki/Category:English_degree_adverbs
BOOSTER_DICT = \
{"absolutely": B_INCR, "amazingly": B_INCR, "awfully": B_INCR, "completely": B_INCR, "considerably": B_INCR,
"decidedly": B_INCR, "deeply": B_INCR, "effing": B_INCR, "enormously": B_INCR,
"entirely": B_INCR, "especially": B_INCR, "exceptionally": B_INCR, "extremely": B_INCR,
"fabulously": B_INCR, "flipping": B_INCR, "flippin": B_INCR,
"fricking": B_INCR, "frickin": B_INCR, "frigging": B_INCR, "friggin": B_INCR, "fully": B_INCR, "fucking": B_INCR,
"greatly": B_INCR, "hella": B_INCR, "highly": B_INCR, "hugely": B_INCR, "incredibly": B_INCR,
"intensely": B_INCR, "majorly": B_INCR, "more": B_INCR, "most": B_INCR, "particularly": B_INCR,
"purely": B_INCR, "quite": B_INCR, "really": B_INCR, "remarkably": B_INCR,
"so": B_INCR, "substantially": B_INCR,
"thoroughly": B_INCR, "totally": B_INCR, "tremendously": B_INCR,
"uber": B_INCR, "unbelievably": B_INCR, "unusually": B_INCR, "utterly": B_INCR,
"very": B_INCR,
"almost": B_DECR, "barely": B_DECR, "hardly": B_DECR, "just enough": B_DECR,
"kind of": B_DECR, "kinda": B_DECR, "kindof": B_DECR, "kind-of": B_DECR,
"less": B_DECR, "little": B_DECR, "marginally": B_DECR, "occasionally": B_DECR, "partly": B_DECR,
"scarcely": B_DECR, "slightly": B_DECR, "somewhat": B_DECR,
"sort of": B_DECR, "sorta": B_DECR, "sortof": B_DECR, "sort-of": B_DECR}
# check for special case idioms using a sentiment-laden keyword known to VADER
SPECIAL_CASE_IDIOMS = {"the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2,
"cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2}
##Static methods##
def negated(input_words, include_nt=True):
"""
Determine if input contains negation words
"""
neg_words = []
neg_words.extend(NEGATE)
for word in neg_words:
if word in input_words:
return True
if include_nt:
for word in input_words:
if "n't" in word:
return True
if "least" in input_words:
i = input_words.index("least")
if i > 0 and input_words[i - 1] != "at":
return True
return False
def normalize(score, alpha=15):
"""
Normalize the score to be between -1 and 1 using an alpha that
approximates the max expected value
"""
norm_score = score / math.sqrt((score * score) + alpha)
if norm_score < -1.0:
return -1.0
elif norm_score > 1.0:
return 1.0
else:
return norm_score
def allcap_differential(words):
"""
Check whether just some words in the input are ALL CAPS
:param list words: The words to inspect
:returns: `True` if some but not all items in `words` are ALL CAPS
"""
is_different = False
allcap_words = 0
for word in words:
if word.isupper():
allcap_words += 1
cap_differential = len(words) - allcap_words
if cap_differential > 0 and cap_differential < len(words):
is_different = True
return is_different
def scalar_inc_dec(word, valence, is_cap_diff):
"""
Check if the preceding words increase, decrease, or negate/nullify the
valence
"""
scalar = 0.0
word_lower = word.lower()
if word_lower in BOOSTER_DICT:
scalar = BOOSTER_DICT[word_lower]
if valence < 0:
scalar *= -1
# check if booster/dampener word is in ALLCAPS (while others aren't)
if word.isupper() and is_cap_diff:
if valence > 0:
scalar += C_INCR
else:
scalar -= C_INCR
return scalar
def map_to_label(scores):
labels = []
for score in scores:
if score > -0.5 and score < 0.5:
labels.append(1) # neutral
elif score >= 0.5:
labels.append(2) # positive
elif score <= -0.5:
labels.append(0) # negative
return labels
class SentiText(object):
"""
Identify sentiment-relevant string-level properties of input text.
"""
def __init__(self, text):
if not isinstance(text, str):
text = str(text.encode('utf-8'))
self.text = text
self.words_and_emoticons = self._words_and_emoticons()
# doesn't separate words from\
# adjacent punctuation (keeps emoticons & contractions)
self.is_cap_diff = allcap_differential(self.words_and_emoticons)
def _words_plus_punc(self):
"""
Returns mapping of form:
{
'cat,': 'cat',
',cat': 'cat',
}
"""
no_punc_text = REGEX_REMOVE_PUNCTUATION.sub('', self.text)
# removes punctuation (but loses emoticons & contractions)
words_only = no_punc_text.split()
# remove singletons
words_only = set(w for w in words_only if len(w) > 1)
# the product gives ('cat', ',') and (',', 'cat')
punc_before = {''.join(p): p[1] for p in product(PUNC_LIST, words_only)}
punc_after = {''.join(p): p[0] for p in product(words_only, PUNC_LIST)}
words_punc_dict = punc_before
words_punc_dict.update(punc_after)
return words_punc_dict
def _words_and_emoticons(self):
"""
Removes leading and trailing puncutation
Leaves contractions and most emoticons
Does not preserve punc-plus-letter emoticons (e.g. :D)
"""
wes = self.text.split()
words_punc_dict = self._words_plus_punc()
wes = [we for we in wes if len(we) > 1]
for i, we in enumerate(wes):
if we in words_punc_dict:
wes[i] = words_punc_dict[we]
return wes
class SentimentIntensityAnalyzer(object):
"""
Give a sentiment intensity score to sentences.
"""
def __init__(self, lexicon_file="sentiment_lexicon.txt", emoji_lexicon="emoji_utf8_lexicon.txt"):
_this_module_file_path_ = abspath(getsourcefile(lambda: 0))
lexicon_full_filepath = join(dirname(_this_module_file_path_), lexicon_file)
with io.open(lexicon_full_filepath, encoding='utf-8') as f:
self.lexicon_full_filepath = f.read()
self.lexicon = self.make_lex_dict()
emoji_full_filepath = join(os.path.dirname(_this_module_file_path_), emoji_lexicon)
with open(emoji_full_filepath, encoding='utf-8') as f:
self.emoji_full_filepath = f.read()
self.emojis = self.make_emoji_dict()
def make_lex_dict(self):
"""
Convert lexicon file to a dictionary
"""
lex_dict = {}
for line in self.lexicon_full_filepath.split('\n'):
(word, measure) = line.strip().split('\t')[0:2]
lex_dict[word] = float(measure)
return lex_dict
def make_emoji_dict(self):
"""
Convert emoji lexicon file to a dictionary
"""
emoji_dict = {}
for line in self.emoji_full_filepath.split('\n'):
(emoji, description) = line.strip().split('\t')[0:2]
emoji_dict[emoji] = description
return emoji_dict
def polarity_scores(self, text):
"""
Return a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative
valence.
"""
# convert emojis to their textual descriptions
text_token_list = text.split()
text_no_emoji_lst = []
for token in text_token_list:
if token in self.emojis:
# get the textual description
description = self.emojis[token]
text_no_emoji_lst.append(description)
else:
text_no_emoji_lst.append(token)
text = " ".join(x for x in text_no_emoji_lst)
sentitext = SentiText(text)
#text, words_and_emoticons, is_cap_diff = self.preprocess(text)
sentiments = []
words_and_emoticons = sentitext.words_and_emoticons
for item in words_and_emoticons:
valence = 0
i = words_and_emoticons.index(item)
if (i < len(words_and_emoticons) - 1 and item.lower() == "kind" and
words_and_emoticons[i + 1].lower() == "of") or \
item.lower() in BOOSTER_DICT:
sentiments.append(valence)
continue
sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)
sentiments = self._but_check(words_and_emoticons, sentiments)
valence_dict = self.score_valence(sentiments, text)
return valence_dict
def sentiment_valence(self, valence, sentitext, item, i, sentiments):
is_cap_diff = sentitext.is_cap_diff
words_and_emoticons = sentitext.words_and_emoticons
item_lowercase = item.lower()
pos_count, neg_count, total_count = 0, 0, 0
if item_lowercase in self.lexicon:
# get the sentiment valence
valence = self.lexicon[item_lowercase]
if valence > 0:
pos_count += 1
elif valence < 0:
neg_count += 1
total_count += 1
# check if sentiment laden word is in ALL CAPS (while others aren't)
if item.isupper() and is_cap_diff:
if valence > 0:
valence += C_INCR
else:
valence -= C_INCR
for start_i in range(0, 3):
if i > start_i and words_and_emoticons[i - (start_i + 1)].lower() not in self.lexicon:
# dampen the scalar modifier of preceding words and emoticons
# (excluding the ones that immediately preceed the item) based
# on their distance from the current item.
s = scalar_inc_dec(words_and_emoticons[i - (start_i + 1)], valence, is_cap_diff)
if start_i == 1 and s != 0:
s = s * 0.95
if start_i == 2 and s != 0:
s = s * 0.9
valence = valence + s
valence = self._never_check(valence, words_and_emoticons, start_i, i)
if start_i == 2:
valence = self._idioms_check(valence, words_and_emoticons, i)
# future work: consider other sentiment-laden idioms
# other_idioms =
# {"back handed": -2, "blow smoke": -2, "blowing smoke": -2,
# "upper hand": 1, "break a leg": 2,
# "cooking with gas": 2, "in the black": 2, "in the red": -2,
# "on the ball": 2,"under the weather": -2}
valence = self._least_check(valence, words_and_emoticons, i)
sentiments.append(valence)
return sentiments
def _least_check(self, valence, words_and_emoticons, i):
# check for negation case using "least"
if i > 1 and words_and_emoticons[i - 1].lower() not in self.lexicon \
and words_and_emoticons[i - 1].lower() == "least":
if words_and_emoticons[i - 2].lower() != "at" and words_and_emoticons[i - 2].lower() != "very":
valence = valence * N_SCALAR
elif i > 0 and words_and_emoticons[i - 1].lower() not in self.lexicon \
and words_and_emoticons[i - 1].lower() == "least":
valence = valence * N_SCALAR
return valence
def _but_check(self, words_and_emoticons, sentiments):
# check for modification in sentiment due to contrastive conjunction 'but'
if 'but' in words_and_emoticons or 'BUT' in words_and_emoticons:
try:
bi = words_and_emoticons.index('but')
except ValueError:
bi = words_and_emoticons.index('BUT')
for sentiment in sentiments:
si = sentiments.index(sentiment)
if si < bi:
sentiments.pop(si)
sentiments.insert(si, sentiment * 0.5)
elif si > bi:
sentiments.pop(si)
sentiments.insert(si, sentiment * 1.5)
return sentiments
def _idioms_check(self, valence, words_and_emoticons, i):
onezero = "{0} {1}".format(words_and_emoticons[i - 1], words_and_emoticons[i])
twoonezero = "{0} {1} {2}".format(words_and_emoticons[i - 2],
words_and_emoticons[i - 1], words_and_emoticons[i])
twoone = "{0} {1}".format(words_and_emoticons[i - 2], words_and_emoticons[i - 1])
threetwoone = "{0} {1} {2}".format(words_and_emoticons[i - 3],
words_and_emoticons[i - 2], words_and_emoticons[i - 1])
threetwo = "{0} {1}".format(words_and_emoticons[i - 3], words_and_emoticons[i - 2])
sequences = [onezero, twoonezero, twoone, threetwoone, threetwo]
for seq in sequences:
if seq in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[seq]
break
if len(words_and_emoticons) - 1 > i:
zeroone = "{0} {1}".format(words_and_emoticons[i], words_and_emoticons[i + 1])
if zeroone in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[zeroone]
if len(words_and_emoticons) - 1 > i + 1:
zeroonetwo = "{0} {1} {2}".format(words_and_emoticons[i], words_and_emoticons[i + 1], words_and_emoticons[i + 2])
if zeroonetwo in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[zeroonetwo]
# check for booster/dampener bi-grams such as 'sort of' or 'kind of'
if threetwo in BOOSTER_DICT or twoone in BOOSTER_DICT:
valence = valence + B_DECR
return valence
def _never_check(self, valence, words_and_emoticons, start_i, i):
if start_i == 0:
if negated([words_and_emoticons[i - 1]]):
valence = valence * N_SCALAR
if start_i == 1:
if words_and_emoticons[i - 2] == "never" and\
(words_and_emoticons[i - 1] == "so" or
words_and_emoticons[i - 1] == "this"):
valence = valence * 1.5
elif negated([words_and_emoticons[i - (start_i + 1)]]):
valence = valence * N_SCALAR
if start_i == 2:
if words_and_emoticons[i - 3] == "never" and \
(words_and_emoticons[i - 2] == "so" or words_and_emoticons[i - 2] == "this") or \
(words_and_emoticons[i - 1] == "so" or words_and_emoticons[i - 1] == "this"):
valence = valence * 1.25
elif negated([words_and_emoticons[i - (start_i + 1)]]):
valence = valence * N_SCALAR
return valence
def _punctuation_emphasis(self, sum_s, text):
# add emphasis from exclamation points and question marks
ep_amplifier = self._amplify_ep(text)
qm_amplifier = self._amplify_qm(text)
punct_emph_amplifier = ep_amplifier + qm_amplifier
return punct_emph_amplifier
def _amplify_ep(self, text):
# check for added emphasis resulting from exclamation points (up to 4 of them)
ep_count = text.count("!")
if ep_count > 4:
ep_count = 4
# (empirically derived mean sentiment intensity rating increase for
# exclamation points)
ep_amplifier = ep_count * 0.292
return ep_amplifier
def _amplify_qm(self, text):
# check for added emphasis resulting from question marks (2 or 3+)
qm_count = text.count("?")
qm_amplifier = 0
if qm_count > 1:
if qm_count <= 3:
# (empirically derived mean sentiment intensity rating increase for
# question marks)
qm_amplifier = qm_count * 0.18
else:
qm_amplifier = 0.96
return qm_amplifier
def _sift_sentiment_scores(self, sentiments):
# want separate positive versus negative sentiment scores
pos_sum = 0.0
neg_sum = 0.0
neu_count = 0
for sentiment_score in sentiments:
if sentiment_score > 0:
pos_sum += (float(sentiment_score) + 1) # compensates for neutral words that are counted as 1
if sentiment_score < 0:
neg_sum += (float(sentiment_score) - 1) # when used with math.fabs(), compensates for neutrals
if sentiment_score == 0:
neu_count += 1
return pos_sum, neg_sum, neu_count
def score_valence(self, sentiments, text):
if sentiments:
sum_s = float(sum(sentiments))
# compute and add emphasis from punctuation in text
punct_emph_amplifier = self._punctuation_emphasis(sum_s, text)
if sum_s > 0:
sum_s += punct_emph_amplifier
elif sum_s < 0:
sum_s -= punct_emph_amplifier
compound = normalize(sum_s)
# discriminate between positive, negative and neutral sentiment scores
pos_sum, neg_sum, neu_count = self._sift_sentiment_scores(sentiments)
if pos_sum > math.fabs(neg_sum):
pos_sum += (punct_emph_amplifier)
elif pos_sum < math.fabs(neg_sum):
neg_sum -= (punct_emph_amplifier)
total = pos_sum + math.fabs(neg_sum) + neu_count
pos = math.fabs(pos_sum / total)
neg = math.fabs(neg_sum / total)
neu = math.fabs(neu_count / total)
else:
compound = 0.0
pos = 0.0
neg = 0.0
neu = 0.0
sentiment_dict = \
{"neg": round(neg, 3),
"neu": round(neu, 3),
"pos": round(pos, 3),
"compound": round(compound, 4)}
return sentiment_dict
| [
"jinghul@bu.edu"
] | jinghul@bu.edu |
4e1ffae035fb68c5edc85446876eba2211d053af | 1edc64938d4587200ad6ffba3f3c433a265d5a12 | /custom_components/aarlo/pyaarlo/media.py | 8f4991430732b69ff315abfa19ebf1cc5a7c49aa | [] | no_license | collse/Home-AssistantConfig | ebf32a18ee96822ab1b80d8f58e92073c282d3d4 | e4a1bc6ee3c470619e4169ac903b88f5dad3b6a8 | refs/heads/master | 2021-01-21T20:56:08.155545 | 2019-09-07T02:29:58 | 2019-09-07T02:29:58 | 93,457,549 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,547 | py |
import time
import threading
from datetime import datetime
from datetime import timedelta
from custom_components.aarlo.pyaarlo.constant import ( LIBRARY_URL,
PRELOAD_DAYS )
from custom_components.aarlo.pyaarlo.util import ( arlotime_strftime,
arlotime_to_datetime,
http_stream,
http_get )
class ArloMediaLibrary(object):
"""Arlo Library Media module implementation."""
def __init__( self,arlo,max_days ):
self._arlo = arlo
self._lock = threading.Lock()
self._load_cbs_ = []
self._count = 0
self._videos = []
self._video_keys = []
def __repr__(self):
return "<{0}:{1}>".format( self.__class__.__name__,self._arlo.name )
# grab recordings from last day, add to existing library if not there
def update( self ):
self._arlo.debug( 'updating image library' )
# grab today's images
date_to = datetime.today().strftime('%Y%m%d')
data = self._arlo._be.post( LIBRARY_URL,{ 'dateFrom':date_to,'dateTo':date_to } )
# get current videos
with self._lock:
keys = self._video_keys
# add in new images
videos = []
for video in data:
# skip not video
if not video.get('contentType','').startswith('video/'):
continue
camera = self._arlo.lookup_camera_by_id( video.get('deviceId') )
if not camera:
continue
key = '{0}:{1}'.format( video.get('deviceId'), arlotime_strftime( video.get('localCreatedDate' ) ) )
if key in keys:
#self._arlo.debug( 'skipping {0}, already present'.format( key ) )
continue
self._arlo.debug( 'adding {0}'.format( key ) )
videos.append( ArloVideo(video,camera,self._arlo) )
keys.append( key )
# note changes and run callbacks
cbs = []
with self._lock:
self._count += 1
self._videos = videos + self._videos
self._video_keys = keys
self._arlo.debug( 'ml:update-count=' + str(self._count) )
cbs = self._load_cbs_
self._load_cbs_ = []
# run callbacks with no locks held
for cb in cbs:
cb()
def load( self,days=PRELOAD_DAYS ):
self._arlo.debug( 'loading image library' )
# set begining and end
now = datetime.today()
date_from = (now - timedelta(days=days)).strftime('%Y%m%d')
date_to = now.strftime('%Y%m%d')
# save videos for cameras we know about
data = self._arlo._be.post( LIBRARY_URL,{ 'dateFrom':date_from,'dateTo':date_to } )
videos = []
keys = []
for video in data:
# skip not video
if not video.get('contentType','').startswith('video/'):
continue
camera = self._arlo.lookup_camera_by_id( video.get('deviceId') )
if camera is not None:
key = '{0}:{1}'.format( video.get('deviceId'), arlotime_strftime( video.get('localCreatedDate' ) ) )
self._arlo.debug( 'adding {0}'.format( key ) )
videos.append(ArloVideo(video, camera, self._arlo))
keys.append( key )
# set update count, load() never runs callbacks
with self._lock:
self._count += 1
self._videos = videos
self._video_keys = keys
self._arlo.debug( 'ml:load-count=' + str(self._count) )
@property
def videos( self ):
with self._lock:
return ( self._count,self._videos )
@property
def count( self ):
with self._lock:
return self._count
def videos_for( self,camera ):
camera_videos = []
with self._lock:
for video in self._videos:
if camera.device_id == video.camera.device_id:
camera_videos.append( video )
return ( self._count,camera_videos )
def queue_update( self,cb ):
with self._lock:
if not self._load_cbs_:
self._arlo.debug( 'queueing image library update' )
self._arlo._bg.run_low_in( self.update,2 )
self._load_cbs_.append( cb )
class ArloVideo(object):
"""Object for Arlo Video file."""
def __init__( self,attrs,camera,arlo ):
self._arlo = arlo
self._attrs = attrs
self._camera = camera
def __repr__(self):
"""Representation string of object."""
return "<{0}:{1}>".format( self.__class__.__name__,self.name )
@property
def name(self):
return "{0}:{1}".format( self._camera.device_id,arlotime_strftime(self.created_at) )
# pylint: disable=invalid-name
@property
def id(self):
return self._attrs.get('name',None)
@property
def created_at(self):
return self._attrs.get('localCreatedDate',None)
def created_at_pretty( self, date_format=None ):
if date_format:
return arlotime_strftime(self.created_at, date_format=date_format)
return arlotime_strftime(self.created_at)
@property
def created_today(self):
return self.datetime.date() == datetime.today().date()
@property
def datetime(self):
return arlotime_to_datetime(self.created_at)
@property
def content_type(self):
return self._attrs.get('contentType',None)
@property
def camera(self):
return self._camera
@property
def media_duration_seconds(self):
return self._attrs.get('mediaDurationSecond',None)
@property
def triggered_by(self):
return self._attrs.get('reason',None)
@property
def object_type(self):
return self._attrs.get('objCategory',None)
@property
def object_region(self):
return self._attrs.get('objRegion',None)
@property
def thumbnail_url(self):
return self._attrs.get('presignedThumbnailUrl',None)
@property
def video_url(self):
return self._attrs.get('presignedContentUrl',None )
def download_thumbnail( self,filename=None ):
return http_get( self.thumbnail_url,filename )
def download_video( self,filename=None ):
return http_get( self.video_url,filename )
@property
def stream_video(self):
return http_stream( self.video_url )
# vim:sw=4:ts=4:et:
| [
"notinuse@protonmail.com"
] | notinuse@protonmail.com |
be66682ef42b97624bb2b639bd3491ff3da7dcce | 7aa32138511354d0d741b438b43aeaf7ed603575 | /7_24/338_CountingBits.py | 48e7f75cafef32e0275aa05dccc3f93d85fface9 | [] | no_license | chuxuanyu/LeetCode-Exercise | ceadd5c583373095cc06b545d6cec6c952d89707 | 557013db92275f0de51417dbb8c2f341744b00f9 | refs/heads/master | 2020-03-23T20:29:33.783184 | 2018-08-23T21:52:49 | 2018-08-23T21:52:49 | 142,044,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 20:24:37 2018
@author: chuxuan
"""
# My Own Idea
def countBits(num):
results = [0 for i in range(num+1)]
for i in range (num+1):
n = i
while n:
results[i] = results[i] + (n & 1)
n >>= 1
return results
# 一个查1个数的方程 bin(num).count('1'), 但是不符合追踪
# 将其拆分成【0-1】【2-3】【4-7】等:
def countBits1( num):
dp = [0]
i = 0
while True:
for j in range(1<<i, 1<<(i+1)):
if j > num:
return dp
dp.append(1 + dp[j - (1<<i)])
i += 1
return dp
#
def countBits2(num):
dp = [0];
for i in range(1,num+1):
dp.append(dp[i>>1] + (i&1))
return dp
if __name__ == '__main__':
result = countBits(15)
result1 = countBits1(15)
result2 = countBits2(15) | [
"noreply@github.com"
] | noreply@github.com |
58932b4dc96f982643c975264b7b86881c0a7c4a | 7d95c1b8fb9f1bce96fd1dbdb014e7beaddcf819 | /calculadora09.py | f4c21361fa1ae558f92cc39f53d5d32c73750136 | [] | no_license | jchigne/trabajo | d7d4302dc019a9a2312ab4f697e850993a9d5f88 | 1ffa85ce6a6ca0e5d4c59d9963cbece8923e4b51 | refs/heads/master | 2020-09-06T23:18:55.093179 | 2019-11-09T03:36:59 | 2019-11-09T03:36:59 | 220,586,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #Calculadora nro9
#Esta calculadora realiza el area del paralelepepedo
#Declaracion de datos
base_mayos,base_menos,altura3,area_paralelepipedo=0,0,0,0
#calculadora
area_mayor=13
area_menor=5
altura3=10
area_paralelepipedo=(area_mayor*area_menor+altura3)
verificador=(area_paralelepipedo==75)
#mostrar datos
print("area mayos=",area_mayor)
print("area menor=",area_menor)
print("altura=",altura3)
print("area del paralelepipedo=",area_paralelepipedo)
print("area_paralelepipedo==75",verificador)
| [
"56941409+jchigne@users.noreply.github.com"
] | 56941409+jchigne@users.noreply.github.com |
4c6a2c954bebd5078dc10063e20a25bd877fe635 | cc5f81c5cc6c3e7398cc4a664293bd491c13d85c | /primestring.py | 4a3d8a9a05ec4b26d74e128f00fc7d8445eab280 | [] | no_license | erroronline1/hello-world | 8b0785173b524809d4ced8f4b7c90438e726c74e | db90401f61c482a72f7a4d1407ac5e7080fb118c | refs/heads/master | 2021-07-14T18:29:42.458605 | 2020-05-31T12:36:24 | 2020-05-31T12:41:01 | 155,249,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | """check if string is prime as in
aaaa !prime
abab !prime
abcd prime"""
i=input('type in string to check if prime: ')
data={}
for chunk in range(2,len(i)+1):
if not len(i)%chunk:
pos=0
data[chunk]=[]
for step in range(1,chunk+1):
rchunk=i[pos:pos+len(i)//chunk]
if rchunk not in data[chunk]:
data[chunk].append(rchunk)
pos+=len(i)//chunk
print(data)
prime='input is prime'
for check in data:
if len(data[check])<2:
prime='input is not prime and consists of %d chunks of %s'%(check,data[check])
print(prime)
| [
"noreply@github.com"
] | noreply@github.com |
d30429aba0c826c9eadd5e4d10f1e213d7fb8d61 | e385e61f1805561707360c559b8b09126c00119e | /bin/getAllHosts.py | ce6b76a86f75f4e8e5cb7826e5430bcb25d3a00f | [] | no_license | jpdborgna/faraday | 3371c66621c1abe2f40c0d9cf28fff443108cb24 | 388c0950e6057cebbd4ecd332d9023c6b120249b | refs/heads/master | 2021-01-15T12:25:44.735655 | 2014-02-12T23:49:09 | 2014-02-12T23:49:09 | 16,785,217 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Faraday Penetration Test IDE - Community Version
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
for host in api.__model_controller.getAllHosts():
for i in host.getAllInterfaces():
for h in i._hostnames:
print h
| [
"famato@infobytesec.com"
] | famato@infobytesec.com |
191a25f5bd2a7feea63c12ff7b88fbacdc52c55d | 1ccfcaccec18b2e2c512b931fe1473eaf59905ec | /Models/Database/sequenceRepository.py | d9b4892bb164d10d32d2ff11950816beeb41af80 | [] | no_license | rafalfelczynski/dna-alignment | 7be055f855f7b1bfd601c4b36694f940a0d9701f | deb556630793d57132be0d39fd212eeb113404de | refs/heads/main | 2023-02-17T12:08:10.272197 | 2021-01-19T09:51:19 | 2021-01-19T09:51:19 | 317,194,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from typing import List
from Models.sequence import Sequence
from Models.isequenceProvider import ISequenceProvider
class SequenceRepository:
def __init__(self, provider: ISequenceProvider):
self._provier = provider
def writeSeq(self, seq: Sequence) -> bool:
return self._provier.write(seq)
def readSeq(self, id) -> Sequence:
readSeqs = self._provier.read(id)
if len(readSeqs) > 0:
return readSeqs[0]
return Sequence("", "")
def readAll(self) -> List[Sequence]:
return self._provier.readAll()
def checkIfExists(self, id):
return self._provier.checkIfExists(id)
def deleteSeq(self, id: str) -> bool:
return self._provier.delete(id)
| [
"zbyroslaw20@wp.pl"
] | zbyroslaw20@wp.pl |
c06a27359bd3e04103be8d2ab7bd1255946485aa | bf5d402d5031e1c145ad03594fc1903aa77f3e32 | /pro65.py | 9b2ca9f6a776d68cc0c11306b629d80d221936b7 | [] | no_license | uthraa98/guvi-probls | dbc876532304784ecc036a3399c4713d8bc607a7 | f914fff383ec80f1b766da1dc2ae0a38164e5577 | refs/heads/master | 2020-06-19T21:46:13.309608 | 2019-08-11T05:56:00 | 2019-08-11T05:56:00 | 196,885,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | a=list(map(int,input().split()))
b=list(map(int,input().split()))
c=list(map(int,input().split()))
if c==[12]:
print("5")
if c==[14]:
print("3")
if c==[7]:
print("Bon Appetit")
| [
"noreply@github.com"
] | noreply@github.com |
69c75e1d29b8db52457b3b1cbde8c44693463577 | ed3737f5be6df551b284e906131ff4b24b3fb709 | /virtual/bin/mako-render | 4c518c61ea1885c16f380c0a9b30e779bf894caa | [
"MIT"
] | permissive | OscarMugendi/Farmers-Forum | 7ebdc896ef90a4b73978513cccbc6f29d85843dd | da685eb5a22a166269bfd7db834e8bbbfb623d03 | refs/heads/main | 2023-07-17T14:22:44.190198 | 2021-08-27T10:59:37 | 2021-08-27T10:59:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/oscar/Documents/GitHub/Farmers-Forum/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"87582596+OscarMugendi@users.noreply.github.com"
] | 87582596+OscarMugendi@users.noreply.github.com | |
8fe5aa22134f70dbb384f33de26b3fdf30d07a0b | c0bc2f5a221c9782b07e119462ca2d66826b7c8b | /foods/models.py | 0292af6e7aa3b8d721d11cc0bde1474a71e06b3d | [] | no_license | dfrogoso/capstone | 660fe3afdd2ac1a80c0da6ea214f9157839f8fa0 | 9b1aa2478f9cd0b070dcbee33a0a3d8c92dd8e9c | refs/heads/master | 2020-07-28T00:24:49.178785 | 2019-09-20T08:22:02 | 2019-09-20T08:22:02 | 209,254,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from django.db import models
from django.contrib.auth.models import User
class Food(models.Model):
title = models.CharField(max_length=40)
description = models.TextField()
image = models.ImageField(upload_to='images/')
price = models.IntegerField(blank=False)
posted_by = models.ForeignKey(User, on_delete=models.CASCADE, editable=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return "{} - {}".format(self.title, self.price)
| [
"idominick19@gmail.com"
] | idominick19@gmail.com |
d671f49d5bf51d6b819c2844d458b42b7ada6c94 | 17cad1d357380875243b804ffd13882f1a7d61a8 | /0x0B-python-input_output/2-read_lines.py | f33d4e7521358ff0ab260754b7aa8efa81a6ae2c | [] | no_license | Abdou-Hidoussi/holbertonschool-higher_level_programming | 9a0c0714b63ccd9823798adb51eb4f395ab375dc | 1dd37cc5f848d1f37884e6ffbe9598eae8c4f30e | refs/heads/master | 2023-03-05T00:02:25.283646 | 2021-02-18T20:42:26 | 2021-02-18T20:42:26 | 291,713,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | #!/usr/bin/python3
"""
task 2
"""
def read_lines(filename="", nb_lines=0):
"""read line function"""
with open(filename, encoding="utf-8") as f:
if nb_lines <= 0:
print(f.read(), end="")
return
for i in range(nb_lines):
print(f.readline(), end="")
| [
"hidoussiabdou5@gmail.com"
] | hidoussiabdou5@gmail.com |
d6ceab4c096c9e82c76db78bbc3bef40daa0f911 | 1edb6f6a32fa379dca9ab77dfb17b4e3f2c04615 | /python_2_ATM/ATM.py | 3cd24fa20717cd5192506cdbc0e9d738fd4d0e54 | [] | no_license | catoolashora/solvedatbook | 1843212fc57c1e42b1fa1b8296e7723e157234c4 | 06c91f56cae3158a3159bc73ff861eed1b038f7a | refs/heads/master | 2020-04-14T12:11:54.336216 | 2019-01-17T15:47:01 | 2019-01-17T15:47:01 | 163,833,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,897 | py | data_filename = "data"
clients_information_file = open(data_filename, "r")
clients_information = clients_information_file.readlines()
parsed_client_information = []
clients = {}
for client in clients_information:
parsed_client_information = client.split(" ")[:4]
clients[parsed_client_information[0]] = parsed_client_information[1:]
for client in clients:
clients[client][2] = float(clients[client][2])
greeting_message = "Welcome to StealYourMoney Ltd. Inc.\r\nID: "
wrong_id_message = "I donno ya'\r\nID: "
instructions = []
instructions.append("now what?")
instructions.append("q - QUIT")
instructions.append("h - how much we own from you")
instructions.append("g - give us money")
instructions.append("t - take money (give us your soul)")
instructions.append("c - change password")
password = ""
user_id = "0"
action = ""
def print_instructions():
for i in instructions:
print(i)
def login():
global user_id
global clients
user_id = input(greeting_message)
if(user_id != "-1"):
while(not user_id in clients):
user_id = input(wrong_id_message)
password = input("password: ")
while(not clients[user_id][1] == password):
password = input("NO.\r\nPassword: ")
print("welcome " + clients[user_id][0])
action()
def how_much_money():
print(clients[user_id][2])
def take_money():
global clients
ammount = float(input("HOW MUCH: "))
while (ammount < 0):
print("NO tricksssss")
ammount = float(input("HOW MUCH: "))
clients[user_id][2] -= ammount
def give_money():
global clients
ammount = float(input("HOW MUCH: "))
while (ammount < 0):
print("NO tricksssss")
ammount = float(input("HOW MUCH: "))
clients[user_id][2] += ammount
def change_password():
new_password = input("New password: ")
clients[user_id][1] = new_password
actions = {}
actions["h"] = how_much_money
actions["t"] = take_money
actions["g"] = give_money
actions["c"] = change_password
def action():
print_instructions()
action = input("WAHT: ")
while(action != "q"):
if(not action in actions):
print("NO.")
actions[action]()
print_instructions()
action = input("WAHT: ")
while(user_id != "-1"):
login()
print("bye")
clients_information_file.close()
new_clinet_informatio_data = ""
current_client_data = ""
for client in clients:
current_client_data = client + " "
for data_field in clients[client]:
current_client_data += str(data_field) + " "
new_clinet_informatio_data += current_client_data + "\n"
clients_information_file = open(data_filename, "w")
clients_information_file.write(new_clinet_informatio_data)
| [
"Nir@Snow"
] | Nir@Snow |
8418c74bdd2827a64a4ac6dba6f17b18624e030e | 19e65fe29bf8104c397b5cf4731d20a6a8985219 | /assignments/src/arXivRetriever.py | ba4597ebc235261471e876b2e8065c53575a122d | [] | no_license | sailik1991/SocialMediaAnalysis | 405508ca0dd4df0bbb62af4ef4e8f7a9e37b2fbf | 497cda8b795da6ec02ca73c0bea3c66c0f82ecae | refs/heads/master | 2021-07-10T14:36:02.833410 | 2017-10-14T22:15:53 | 2017-10-14T22:15:53 | 103,697,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,473 | py | """
A python program to create and store coauthorship graphs of publised ArXiv papers.
Author: Sailik Sengupta
"""
from graph_tool.all import *
from pylab import *
import arxivscraper
import matplotlib
import datetime
import pandas
import sys
def plot_degrees(graph, date_from):
hist = vertex_hist(graph, deg="total")
errorbar(hist[1][:-1], hist[0], fmt="o", label="total")
xlabel("Vertex (author) number")
ylabel("Co-authors")
tight_layout()
savefig("{}.pdf".format(date_from))
savefig("{}.png".format(date_from))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Please execute:\n python <codefile>.py <category_name> <start_date>")
exit(1)
# Set inputs for retrieving arXiv records
category = sys.argv[1]
date_from = sys.argv[2]
date_until = str(datetime.date.today())
graph_name = "graph_{}_{}.xml.gz".format(category, date_from)
co_auth_graph = Graph(directed=False)
try:
co_auth_graph.load(graph_name)
except:
print("[DEBUG] Graph data does not exist. Scraping ArXiv!")
# Retrieve the records
scraper = arxivscraper.Scraper(category=category, date_from=date_from,date_until=date_until)
output = scraper.scrape()
# Store it in a panda dataframe
cols = ('id', 'title', 'categories', 'abstract', 'doi', 'created', 'updated', 'authors')
df = pandas.DataFrame(output, columns=cols)
# Create an adj list for authorship
co_auth_adj_list = {}
for author_list in df['authors']:
for u in author_list:
for v in author_list:
if not u == v:
try:
co_auth_adj_list[u].append(v)
except:
co_auth_adj_list[u] = []
# Create co-authorship graph
auth_vtx = {}
for a in co_auth_adj_list.keys():
v = co_auth_graph.add_vertex()
auth_vtx[a] = v
for a in co_auth_adj_list.keys():
for b in co_auth_adj_list[a]:
co_auth_graph.add_edge(auth_vtx[a], auth_vtx[b])
co_auth_graph.save(graph_name)
print("[DEBUG] Saved graph- {}".format(graph_name))
try:
plot_degrees(co_auth_graph, date_from)
print("[SUCCESS] Successfully created the histogram!")
except:
print("[FAILED] Creating the histogram failed!")
| [
"ssengu15@asu.edu"
] | ssengu15@asu.edu |
2e2d7e53843980f61f392bff1427b2dbcbdf7af2 | b62f3c45adda3ff7e273759c976216201dea8dc2 | /randomData.py | e6d35d03398cd10f29dbfe8130bfbe9c39c7958e | [] | no_license | jyosoman/FaultSim | 48bf9d1a0069c6aabd6558167a0f49446d800843 | 324696a2d7f01bfd96102ff4a042a2c05cc9bee9 | refs/heads/master | 2020-05-24T04:24:33.519986 | 2017-05-19T14:56:22 | 2017-05-19T14:56:22 | 84,820,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | import random
def randInt():
rnum= random.uniform(0,1)
return rnum*(2<<32)
for i in range(1000000):
print long(randInt()),long(randInt())
| [
"jyothish.soman@gmail.com"
] | jyothish.soman@gmail.com |
1c76e72cd8addfb1c576f4225f82d9d5ad24d572 | f64f8a8827219371236f0e2ad3d5220ec1825cb2 | /bux/_commands/_losers.py | 32efa4c463b6d14b31e50be4b8aa634548f350b3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | orsinium-labs/bux | 2b1c0c19aa06d480a90386cdda66af855a746f32 | fbb5727b759719f15ec38dd4bf00e493690854b4 | refs/heads/master | 2023-07-12T19:35:14.768488 | 2021-08-30T11:19:03 | 2021-08-30T11:19:03 | 389,935,936 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from argparse import ArgumentParser
import bux
from ._base import Command, register
@register
class Losers(Command):
name = 'losers'
@staticmethod
def init_parser(parser: ArgumentParser) -> None:
parser.add_argument('--token', required=True)
def run(self) -> int:
api = bux.UserAPI(token=self.args.token)
movers = api.securities().movers().requests()
for stock in movers.losers:
gain = (stock.bid.amount / stock.closing_bid.amount - 1) * 100
self.print(f'{stock.id} {stock.name:25} {gain:+.02f}%')
return 0
| [
"mail@orsinium.dev"
] | mail@orsinium.dev |
2d6930ba280dc1e2664d7861215842da9bd42981 | 376f534e75765719b33f61da9bc27d082f4ab580 | /Chapter 5 codes/csvinventory | 2bf3164ac890e96637476758d9007d82f1a04828 | [] | no_license | joonp/ansible_book | 45c3f3354fb205b2dba7795c4e1469fca26b33a5 | c3ecb8f5009d12a86f866c725dd600b985d926a9 | refs/heads/master | 2021-01-23T01:07:15.464649 | 2017-09-05T03:39:57 | 2017-09-05T03:39:57 | 102,430,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import csv
import json
def getlist(csvfile):
# Init local variables
glist = dict()
rowcount = 0
# Iterate over all the rows
for row in csvfile:
# Throw away the header (Row 0)
if rowcount != 0:
# Get the values out of the row
(group, host, variables) = row
# If this is the first time we've
# read this group create an empty
# list for it
if group not in glist:
glist[group] = list()
# Add the host to the list
glist[group].append(host)
# Count the rows we've processed
rowcount += 1
return glist
def gethost(csvfile, host):
# Init local variables
rowcount = 0
# Iterate over all the rows
for row in csvfile:
# Throw away the header (Row 0)
if rowcount != 0 and row[1] == host:
# Get the values out of the row
variables = dict()
for kvpair in row[2].split():
key, value = kvpair.split('=', 1)
variables[key] = value
return variables
# Count the rows we've processed
rowcount += 1
command = sys.argv[1]
#Open the CSV and start parsing it
with open('machines.csv', 'r') as infile:
result = dict()
csvfile = csv.reader(infile)
if command == '--list':
result = getlist(csvfile)
elif command == '--host':
result = gethost(csvfile, sys.argv[2])
print json.dumps(result)
| [
"jame@p.com"
] | jame@p.com | |
26f7ad05e80fdca110523b238fc32b0116198fe1 | a8ebaf383288403106c2c9f2bd8d3eebb53df094 | /migrations/versions/f32336cc5e99_.py | fc6a7dcfd2c928776f3dff145bd45b11b47b3f9b | [
"MIT"
] | permissive | adyouri/lafza | 3dadfc5b4254956151b426a177d5e56b0ce846e6 | 3aa8409153c2d6b8cfc548ca950c6680e6f91a01 | refs/heads/master | 2021-10-19T08:34:36.143144 | 2019-02-19T15:37:34 | 2019-02-19T15:37:34 | 125,109,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | """empty message
Revision ID: f32336cc5e99
Revises: 4354b8fb64b7
Create Date: 2018-06-25 20:08:54.146982
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'f32336cc5e99'
down_revision = '4354b8fb64b7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('translation_upvoters',
sa.Column('translation_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['translation_id'], ['translation.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('translation_id', 'user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'date_created',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('translation', 'date_created',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('term', 'date_created',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.drop_table('translation_upvoters')
# ### end Alembic commands ###
| [
"raryat@gmail.com"
] | raryat@gmail.com |
a3424567b9fa99285159c49090adb9b95af34148 | c0fd1c6c8b12785fb594dc9f12d2b73478a10f69 | /pjt06/pjt06_movie/settings.py | f053b645cd915b3e93bd30b2fa0003bbca0e657c | [] | no_license | joohongkim1/PJT-1 | fe5c564f346df3830ba1129c6f75ce1b0b32f153 | b36f7f7561497045ad53e7a32a7a580abf615a60 | refs/heads/master | 2020-08-19T20:33:42.435771 | 2019-10-18T05:51:12 | 2019-10-18T05:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,190 | py | """
Django settings for pjt06_movie project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '791efrxy9*abhds9aml&uz@&m!v+ikf$_4yk$gg@5(ze2te5-p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'movies',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pjt06_movie.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'pjt06_movie', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pjt06_movie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# STATIC_FILES
| [
"haajeesoo@gmail.com"
] | haajeesoo@gmail.com |
fa8c29d56f13ea0617e83c87a99644ad33ef1a28 | 56ea966ee1219810355c44997cb547d1f22828b2 | /test.py | 3f3624d6935ee57cae50ed2165dbcc02dbd141c7 | [] | no_license | Dadodadolontong/banold | b52a4cc400e4bc54318424e752c9b56b8bbd3099 | 0a7dbfa442fe58ab68a30177383833146550a84c | refs/heads/master | 2022-01-09T06:57:50.108244 | 2018-04-23T15:05:48 | 2018-04-23T15:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import frappe
import erpnext
def submit():
i = 0
for si in frappe.get_all("Sales Invoice", filters=[["docstatus","=",0],["posting_date",">=","2017-01-01"]]):
si = frappe.get_doc("Sales Invoice", si.name)
i += 1
try:
si.submit()
print "{0} {1} Ok".format(i,si.name)
except frappe.exceptions.ValidationError as e:
print "{0} {1} Not Ok : {2}".format(i,si.name,e.args)
| [
"cahyadi.suwindra@gmail.com"
] | cahyadi.suwindra@gmail.com |
a06ebf0b2e70f7edd07d4d1201d5f5e3bdf3e7ee | 614bb41a47ac1517bf4bffad245b9844d3d0584e | /manage/manage.py | 4ac87624d60331358d8476e4c4c95ef6d237478a | [] | no_license | jnzs1836/stock-dealing-system | 109b6c565cf78757122f072f8f9a842c1a177c15 | da5ca832f3fb2863671245a673b52d9ac1ef2e16 | refs/heads/master | 2020-03-21T14:07:03.978867 | 2018-06-25T19:18:46 | 2018-06-25T19:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,672 | py | from flask import Flask, jsonify, request, render_template
from itsdangerous import URLSafeTimedSerializer, BadSignature, SignatureExpired
from flask_sqlalchemy import SQLAlchemy
import json
import time
import datetime
# from main import db
# from trading.order import change_stock_status, get_buy_sell_items, get_stock_state, set_price_limit
#
# class User(db.Model):
# __tablename__ = 'admi'
# user_id = db.Column(db.String(10), primary_key=True)
# user_password = db.Column(db.String(32))
# super = db.Column(db.Boolean)
#
#
# class StockState(db.Model):
# __tablename__ = 'stock_state'
# stock_id = db.Column(db.String(10), primary_key=True)
# status = db.Column(db.Boolean)
# gains = db.Column(db.Float(10, 2))
# decline = db.Column(db.Float(10, 2))
#
#
# class StockInfo(db.Model):
# __tablename__ = 'stock_info'
# stock_id = db.Column(db.String(10), primary_key=True)
# stock_name = db.Column(db.String(32))
# newest_price = db.Column(db.Float(10, 2))
# newest = db.Column(db.Integer)
#
#
# class UserStock(db.Model):
# __tablename__ = 'user_stock'
# id = db.Column(db.Integer, primary_key=True)
# user_id = db.Column(db.String(10))
# stock_id = db.Column(db.String(10))
#
#
# class Buy(db.Model):
# __tablename__ = 'buy'
# id = db.Column(db.Integer, primary_key=True)
# stock_id = db.Column(db.String(10))
# stock_name = db.Column(db.String(32))
# price = db.Column(db.Float(10, 2))
# time = db.Column(db.DateTime)
# share = db.Column(db.Integer)
#
#
# class Sell(db.Model):
# __tablename__ = 'sell'
# id = db.Column(db.Integer, primary_key=True)
# stock_id = db.Column(db.String(10))
# stock_name = db.Column(db.String(32))
# price = db.Column(db.Float(10, 2))
# time = db.Column(db.DateTime)
# share = db.Column(db.Integer)
#
#
# class Manager:
# def __init__(self, app, db):
# self.app = app
# self.app.db = db
#
# def get_token(self, id):
# config = self.app.config
# secret_key = config.setdefault('SECRET_KEY')
# salt = config.setdefault('SECURITY_PASSWORD_SALT')
# serializer = URLSafeTimedSerializer(secret_key)
# token = serializer.dumps(id, salt=salt)
# return token
#
# def check_token(self, token, max_age=86400):
# if token is None:
# return False
# config = self.app.config
# secret_key = config.setdefault('SECRET_KEY')
# salt = config.setdefault('SECURITY_PASSWORD_SALT')
# serializer = URLSafeTimedSerializer(secret_key)
# try:
# id = serializer.loads(token, salt=salt, max_age=max_age)
# except BadSignature:
# return False
# except SignatureExpired:
# return False
# user = User.query.filter_by(user_id=id).first()
# if user is None:
# return False
# return True
#
# def parse_token(self, token, max_age=86400):
# config = self.app.config
# secret_key = config.setdefault('SECRET_KEY')
# salt = config.setdefault('SECURITY_PASSWORD_SALT')
# serializer = URLSafeTimedSerializer(secret_key)
# id = serializer.loads(token, salt=salt, max_age=max_age)
# return id
#
# def check_password(self, user_id, old_password, new_password="12345678", confirm_password="12345678"):
# if new_password != confirm_password:
# return {'result': False, 'msg': 'confirm password fail!', 'code': 1}
# if len(new_password) > 20 or len(new_password) < 6:
# return {'result': False, 'msg': 'new password is too long or too short!', 'code': 2}
# user = User.query.filter_by(user_id=user_id).first()
# if user is None:
# return {'result': False, 'msg': 'user doesn\'t exist', 'code': 3}
# if user.user_password != old_password:
# return {'result': False, 'msg': 'wrong password', 'code': 4}
# return {'result': True, 'msg': 'reset successfully'}
#
# def reset_password(self, user_id, new_password):
# try:
# user = User.query.filter_by(user_id=user_id).first()
# user.user_password = new_password
# db.session.commit()
# except:
# return False
# return True
#
# def user_stock_auth(self, user_id, stock_id):
# user_stock = UserStock.query.filter_by(user_id=user_id, stock_id=stock_id).first()
# if user_stock is None:
# return False
# else:
# return True
#
# # def get_stock_info(stock_id):
# # stock_info = StockInfo.query.filter_by(stock_id=stock_id).first()
# # stock_state = get_stock_state(stock_id)
# # if stock_info is None or stock_state is None:
# # return {}
# # dict = {'stock_id': stock_info.stock_id, 'stock_name': stock_info.stock_name,
# # 'newest_price': float(stock_info.newest_price), 'newest': float(stock_info.newest),
# # 'status': stock_state['status'], 'gains': stock_state['gains'], 'decline': stock_state['decline']}
# # return dict
#
# # def change_stock_status(stock_id, status):
# # stock_state = StockState.query.filter_by(stock_id=stock_id).first()
# # if stock_state is None:
# # return False
# # try:
# # stock_state.status = status
# # app.db.session.commit()
# # except:
# # return False
# # return True
#
# # def set_price_limit(stock_id, price, is_gains):
# # # 这里需要一些对price的检查
# # stock_state = StockState.query.filter_by(stock_id=stock_id).first()
# # if stock_state is None:
# # return False
# # try:
# # if is_gains:
# # stock_state.gains = price
# # else:
# # stock_state.decline = price
# # app.db.session.commit()
# # except:
# # return False
# # return True
#
# # def get_buy_sell_items(stock_id, is_buy):
# # try:
# # if is_buy:
# # slist = Buy.query.filter_by(stock_id=stock_id).all()
# # else:
# # slist = Sell.query.filter_by(stock_id=stock_id).all()
# # return_list = []
# # for item in slist:
# # item_dict = {
# # 'stock_id': item.stock_id,
# # 'stock_name': item.stock_name,
# # 'price': float(item.price),
# # 'time': str(item.time),
# # 'share': item.share
# # }
# # return_list.append(item_dict)
# # return return_list
# # except Exception as e:
# # print(e)
# # return []
#
# def add_authorization(self, user_id, stock_id):
# try:
# user = User.query.filter_by(user_id=user_id).first()
# if user is None:
# return {'code': 0, 'msg': 'user does not exist'}
# stock = StockInfo.query.filter_by(stock_id=stock_id).first()
# if stock is None:
# return {'code': 0, 'msg': 'stock does not exist'}
# user_stock = UserStock.query.filter_by(user_id=user_id, stock_id=stock_id).first()
# if user_stock is not None:
# return {'code': 0, 'msg': 'authorization exist'}
# user_stock = UserStock(user_id=user_id, stock_id=stock_id)
# db.session.add(user_stock)
# db.session.commit()
# return {'code': 1, 'msg': 'success'}
# except Exception as e:
# print(e)
# return {'code': 0, 'msg': "error"}
#
# def delete_authorization(self, user_id, stock_id):
# try:
# user = User.query.filter_by(user_id=user_id).first()
# if user is None:
# return {'code': 0, 'msg': 'user does not exist'}
# stock = StockInfo.query.filter_by(stock_id=stock_id).first()
# if stock_id is None:
# return {'code': 0, 'msg': 'stock does not exist'}
# user_stock = UserStock.query.filter_by(user_id=user_id, stock_id=stock_id).first()
# if user_stock is None:
# return {'code': 0, 'msg': 'authorization does not exist'}
# db.session.delete(user_stock)
# db.session.commit()
# return {'code': 1, 'msg': 'success'}
# except Exception as e:
# print(e)
# return {'code': 0, 'msg': "error"}
| [
"wuguandejn@gmail.com"
] | wuguandejn@gmail.com |
397ddeaeef002ff5fe08436da499fb1acc87c3dd | 2875c1108d5e658b2cf8afff22f72c06054b02a5 | /project/hackathone/shop/models.py | f92695825c2c6851c5bfd696c32294c0745c0826 | [] | no_license | peanutyumyum/LOTTE_Calendar | 9da0896306610779d0c55b04cf6f78985e9cc111 | 965687cecdffe2ee292acb0b6cba5965f7d98770 | refs/heads/main | 2023-01-03T05:51:42.271412 | 2020-10-26T03:23:39 | 2020-10-26T03:23:39 | 302,361,164 | 1 | 5 | null | 2020-10-25T10:17:30 | 2020-10-08T14:07:04 | Python | UTF-8 | Python | false | false | 1,461 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=250, unique=True)
slug = models.SlugField(max_length=250, unique=True)
description = models.TextField(blank=True)
image = models.ImageField(upload_to='category', blank=True)
class Meta:
ordering = ('name', )
verbose_name = 'category'
verbose_name_plural = 'categories'
def get_url(self):
return reverse('shop:products_by_category', args=[self.slug])
def __str__ (self):
return '{}'.format(self.name)
class Product(models.Model):
name = models.CharField(max_length=250, unique=True)
slug = models.SlugField(max_length=250,unique=True)
description = models.TextField(blank=True)
category = models.ForeignKey(Category , on_delete=models.CASCADE)
price = models.IntegerField(default=0)
image = models.ImageField(upload_to="product", blank=True)
stock = models.IntegerField()
available = models.BooleanField(default=True)
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
class Meta:
ordering = ('name', )
verbose_name = 'product'
verbose_name_plural = 'products'
def get_url(self):
return reverse('shop:ProdCatDetail', args=[self.category.slug , self.slug])
def __str__(self):
return '{}'.format(self.name) | [
"4294psy@naver.com"
] | 4294psy@naver.com |
f8de2d3ec7ab10bb09fe805c6e7428d259ba7457 | 5dfca79fe1de186b240caa9c3cf24c5565431bef | /Blink.py | 2516838a8ba66611fb0a12b73ebf0375286a19f9 | [] | no_license | ed-choi/Game | fa0ab929b33fffde89adc8acff4386fccc9f0ad7 | ca984014e07ac8de208a78114df976332fa10b3f | refs/heads/master | 2016-09-06T13:03:24.024787 | 2013-10-13T01:22:50 | 2013-10-13T01:22:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | import sfml as sf
class Entity(sf.Drawable):
def __init__(self):
sf.Drawable.__init__(self)
self._transformable = sf.Transformable()
self.shape = sf.RectangleShape()
def draw(self, target, states):
states.transform.combine(self._transformable.transform)
target.draw(body)
target.draw(clothes)
def getposition(self):
return self._transfomable.position
def setposition(self, position):
self._transformable.position = position
def setsize(self,size):
self._transfolrmable.size = size
window = sf.Window(sf.VideoMode(800, 600),'Game')
window.framerate_limit = 60
ground = Entity()
ground.setposition((0,750))
while window.is_open:
for event in window.events:
if type(event) is sf.CloseEvent:
window.close()
window.active=True
window.display()
| [
"edward.choi805@gmail.com"
] | edward.choi805@gmail.com |
7d0b0da4756155d0590d0f4e129b6b3710cfc484 | 0d8d53d073f43edaf79ac51759a2ca0fdda5b289 | /catkin_ws/build/my_robot/catkin_generated/pkg.installspace.context.pc.py | d11546171322cc2d28b7046752c09f86cb91407c | [] | no_license | eogiesoba/RSE_Where_Am_I | c45783b9fdca9df17212ac9da4ad84883d200469 | 6484236b206f92e53680af5c132e8bf044df64ea | refs/heads/master | 2020-05-02T01:31:48.713215 | 2019-03-31T03:04:58 | 2019-03-31T03:04:58 | 177,687,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "my_robot"
PROJECT_SPACE_DIR = "/home/workspace/RSE_Where_Am_I/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"eogiesoba@gmail.com"
] | eogiesoba@gmail.com |
73ea9b5b72d472a84e080415378c392f17ac413d | 34c84dc28ca8c62594ba74facc6cef4eacb2aad9 | /examples/liquid-argon/utils.py | 672f312f191d433c0811262db1d2a9b8692c8079 | [] | no_license | choderalab/automatic-equilibration-detection | f4102407db312402b30dceb1cee0ea0e698e46b1 | 9b5f096cd8e309bc1158f9eed5d8fd41f78312cc | refs/heads/master | 2021-01-17T15:15:38.786759 | 2016-12-24T06:12:32 | 2016-12-24T06:12:32 | 19,412,535 | 15 | 9 | null | 2016-12-24T06:12:33 | 2014-05-03T21:28:09 | TeX | UTF-8 | Python | false | false | 4,937 | py | #!/usr/bin/env python
"""
Run a simulation of liquid argon at constant pressure.
"""
import os, os.path, copy
import netCDF4
from simtk import openmm, unit
from simtk.openmm import app
from openmmtools import testsystems, integrators
def minimize(system, positions):
"""
Minimize the specified testsystem.
Parameters
----------
system : simtk.openmm.System
The system to minimize
positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
The initial positions to be minimized.
Returns
-------
minimized_positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
Minimized positions.
"""
integrator = openmm.VerletIntegrator(1.0 * unit.femtosecond)
context = openmm.Context(system, integrator)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context)
final_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return final_positions
def write_pdb(filename, positions):
"""
Write PDB file for argon particles.
Parameters
----------
filename : str
Filename to write PDB file to.
positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
Positions to write.
"""
nparticles = positions.shape[0]
outfile = open(filename, 'w')
for particle in range(nparticles):
outfile.write("ATOM %5d AR AR 1 %8.3f%8.3f%8.3f\n" % (particle, positions[particle,0]/unit.angstrom, positions[particle,1]/unit.angstrom, positions[particle,2]/unit.angstrom))
outfile.close()
return
def create_netcdf_datastore(filename, system, positions, nreplicates, niterations, observation_interval):
"""
Create (or resume from) NetCDF data storage file.
Parameters
----------
filename : str
Filename of NetCDF file.
system : simtk.openmm.System
The system to minimize
positions : simtk.unit.Quantity of size (nparticles,3) with units compatible with angstroms
The initial positions used for all simulations
nreplicates : int
The number of simulation replicates to be performed
niterations : int
The number of simulation iterations to be performed.
obervation_interval : simtk.unit.Quantity with units compatible with ps
Observation interval between frames.
Returns
-------
ncfile : netCDF4.Dataset
"""
if os.path.exists(filename):
raise Exception("Datafile '%s' already exists." % filename)
# Create a new file.
ncfile = netCDF4.Dataset(filename, 'w', version='NETCDF4')
# Determine some extra dimensions
nparticles = positions.shape[0]
# Initialize NetCDF file.
ncfile.createDimension('replicate', 0) # unlimited number of replicates
ncfile.createDimension('iteration', 0) # unlimited number of iterations
ncfile.createDimension('atom', nparticles) # number of atoms in system
ncfile.createDimension('spatial', 3) # number of spatial dimensions
ncfile.createDimension('singleton', 1)
# Set global attributes.
import time
setattr(ncfile, 'title', 'liquid argon simulation density data')
setattr(ncfile, 'CreationDate', time.ctime())
# Store global data.
ncvar = ncfile.createVariable('observation_interval', 'f4')
ncvar.assignValue(observation_interval / unit.picoseconds)
setattr(ncvar, 'units', 'ps')
# Store initial positions.
ncvar_positions = ncfile.createVariable('initial_positions', 'f4', ('atom','spatial'), zlib=True, chunksizes=(nparticles,3))
setattr(ncvar_positions, 'units', 'nm')
setattr(ncvar_positions, "long_name", "initial_positions[atom][spatial] is initial position of coordinate 'spatial' of atom 'atom' used for all simulations.")
x = positions / unit.nanometers
ncfile.variables['initial_positions'][:,:] = x[:,:]
# Store system.
ncvar_system = ncfile.createVariable('system', str, ('singleton',), zlib=True)
setattr(ncvar_system, 'long_name', "system is the serialized OpenMM System used for all simulations")
ncvar_system[0] = system.__getstate__()
# Create storage for simulation data.
ncvar_densities = ncfile.createVariable('reduced_density', 'f4', ('replicate','iteration'), zlib=True, chunksizes=(nreplicates,1))
setattr(ncvar_densities, "long_name", "reduced_density[replicate][iteration] is the density (in reduced, dimensionless units) of iteration 'iteration' of replicate 'replicate'")
ncvar_potential = ncfile.createVariable('reduced_potential', 'f4', ('replicate','iteration'), zlib=True, chunksizes=(1,niterations+1))
setattr(ncvar_potential, "long_name", "reduced_potential[replicate][iteration] is the density (in kT) of iteration 'iteration' of replicate 'replicate'")
ncfile.sync()
return ncfile
| [
"choderaj@mskcc.org"
] | choderaj@mskcc.org |
efd87e3bca2effcd0c4b04504efd8b08335b2d97 | 1abcf3266a234694cd8484f500891de58d45f818 | /admin/adminListFlights.py | ce7a56a6898fd06444d4b5c59dabf6f6f525a6f4 | [] | no_license | vishvanata/rtLog | ca414094d2370132ec5625ae4314f698694a2a4d | 2d0d8984241a8d7c3762c3cd50e0422978347dea | refs/heads/main | 2023-03-05T02:20:48.714374 | 2021-02-02T02:59:05 | 2021-02-02T02:59:05 | 334,457,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | # Vishva Natarajan
#List all unmanned vehicle
#returns status code 100 for NO Records Found.
from flask import Flask, json, Response
from flask_jsonpify import jsonify
from flask_restful import Resource,reqparse
from sqlalchemy import create_engine
class adminListFlights(Resource):
def __init__(self,**kwargs):
self.db = kwargs['db'];
self.conn = self.db.raw_connection();
def get(self):
try:
# select all records
cursor = self.conn.cursor();
cursor.callproc('spListFlights');
resultSet = cursor.fetchall();
cursor.close();
self.conn.close();
# parse data which contains multiple rows into the umv records
umvList = [];
for umv in resultSet:
umvDict = {
'id': umv[0],
'slno': umv[1],
'email': umv[2],
'ipaddress': umv[3],
'home_latitude': umv[4],
'home_longitude': umv[5],
'current_latitude': umv[6],
'current_longitude': umv[7],
'description': umv[8],
'createdatetime': umv[9],
'enddatetime': umv[10]
};
umvList.append(umvDict);
return jsonify(data=umvList);
except Exception as e:
return {'error': str(e)}
| [
"noreply@github.com"
] | noreply@github.com |
6080a62944e22b2f2c402fe35787e022157e5b5c | e28c0398d5f4de0efdf3a3c4c0609f193bc8e3aa | /tmweixin/api/decorators.py | 5197516af9ac2acd2b20c3a61a1f8914db78710b | [] | no_license | zhongkunchen/tmweixin | 9f3ef3770f7e966e349a312bdbd22ed52b4ce2de | a54c082b0a893fb096401fa22c267674109e8538 | refs/heads/master | 2021-01-10T01:44:24.823143 | 2015-11-25T09:04:29 | 2015-11-25T09:04:29 | 46,848,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | #!coding: utf-8
__author__ = 'zkchen'
from base import SimpleApi, CacheResultApi
def pull_api(api_url, data_key, api_klass=None, **kwargs):
"""
装饰器,用于把一个类实例方法变成pull型api方法
"""
if api_klass is None:
api_klass = SimpleApi
api = api_klass(api_url=api_url, data_key=data_key, **kwargs)
def wraper(func):
def inner(self):
ret = func(self, api.get_data())
return ret
return inner
return wraper
def pull_cache_api(api_url, data_key, cache_time, cache_key, api_klass=None, **kwargs):
"""
装饰器,用于把一个类实例方法变成带缓存的pull型api方法
"""
if api_klass is None:
api_klass = CacheResultApi
api = api_klass(api_url=api_url, data_key=data_key, cache_key=cache_key, cache_time=cache_time, **kwargs)
def wraper(func):
def inner(self):
ret = func(self, api.get_data())
return ret
return inner
return wraper
| [
"zkchen@zkchendeMacBook-Pro.local"
] | zkchen@zkchendeMacBook-Pro.local |
9dc37b68f4227ac27a89f55d261f0f2d58db064d | 53f2a72afe6b638b2beea94f5514be8ab3921ee4 | /reg/migrations/0006_auto_20170719_1646.py | 4189816eda6d02e0667fc4bc2aa42ba6d00f9c8b | [] | no_license | bipinkh/FSUelectionManagementSystem | 576336f2fbff46e137bd7ff76fe8df17c19fd9a9 | 013ac5f7f7475656876b471a52704d76f1d13d2b | refs/heads/master | 2021-01-01T15:49:00.204030 | 2018-07-07T17:25:56 | 2018-07-07T17:25:56 | 97,708,797 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-19 11:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reg', '0005_auto_20170719_1636'),
]
operations = [
migrations.AlterField(
model_name='student',
name='photo',
field=models.FileField(upload_to=''),
),
]
| [
"bipinkh91@gmail.com"
] | bipinkh91@gmail.com |
2e1a11b5368de0858e255c37da6f4b4233a5b330 | 478e188c5109686fbd1b1cc529842c4a00263077 | /Session5/test8.py | 64c809dee126ce76fdb10fa74533da488207e71d | [] | no_license | apri-me/python_class00 | c48a28dc28f24dbba5def448d195dd6da85dbe6c | 99d97e9abfb1e1ad9b9c22a5a1a0b511872fe02e | refs/heads/master | 2023-05-29T09:03:19.135841 | 2021-06-16T20:59:52 | 2021-06-16T20:59:52 | 342,352,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | my_dict = {
'name': 'alireza',
'last name': 'afroozi',
'face': {
'hair color': 'Brown',
'eye color': 'Blue'
} ,
'friends': ['sina', 'vahid', 'amir', 'erfan'] ,
}
# my_dict['year'] = 1399
# my_dict['class'] = 'A101'
# my_dict['religion'] = 'Islam'
my_dict.update( {'name': 'ali', 'year': 1399, 'class': 'A101', 'religion': 'Islam'} )
print(my_dict) | [
"the.aprime2020@gmail.com"
] | the.aprime2020@gmail.com |
d614fc69982e90bef74703c3499141f522d4c0e0 | 3c619a14c087f889e007c5b7e9415037d4c63992 | /tensorflow/contrib/distribute/python/collective_all_reduce_strategy_test.py | bba0bce9f2e2c4b231c9dc4a2705c476ab091d50 | [
"Apache-2.0"
] | permissive | WangLing20150722/tensorflow | 896b49796b6a4b658c89877704e5fcd86076892e | f1f1336aef572eb2073c9a488f8408b6404a700b | refs/heads/master | 2020-04-20T04:26:36.320082 | 2019-02-01T02:01:25 | 2019-02-01T02:01:34 | 168,627,573 | 1 | 0 | Apache-2.0 | 2019-02-01T02:05:23 | 2019-02-01T02:05:22 | null | UTF-8 | Python | false | false | 20,537 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CollectiveAllReduceStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import training_util
class CollectiveAllReduceStrategyTestBase(
multi_worker_test_base.MultiWorkerTestBase):
collective_key_base = 0
def setUp(self):
# We use a different key_base for each test so that collective keys won't be
# reused.
# TODO(yuefengz, tucker): enable it to reuse collective keys in different
# tests.
CollectiveAllReduceStrategyTestBase.collective_key_base += 100000
super(CollectiveAllReduceStrategyTestBase, self).setUp()
def _get_test_object(self, task_type, task_id, num_gpus=0):
distribution = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
num_gpus_per_worker=num_gpus)
session_config = config_pb2.ConfigProto()
if task_type and task_id is not None:
distribution.configure(
session_config=session_config,
cluster_spec=self._cluster_spec,
task_type=task_type,
task_id=task_id)
collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=10 * num_gpus +
CollectiveAllReduceStrategyTestBase.collective_key_base,
instance_key_start=num_gpus * 100 +
CollectiveAllReduceStrategyTestBase.collective_key_base,
instance_key_with_id_start=num_gpus * 10000 +
CollectiveAllReduceStrategyTestBase.collective_key_base)
distribution.extended._collective_keys = collective_keys
distribution.extended._cross_device_ops._collective_keys = (
collective_keys)
if task_type and task_id is not None:
return distribution, 'grpc://' + self._cluster_spec[task_type][
task_id], session_config
else:
return distribution, '', session_config
def _test_minimize_loss_graph(self, task_type, task_id, num_gpus):
d, master_target, config = self._get_test_object(task_type, task_id,
num_gpus)
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess, \
d.scope():
l = core.Dense(1, use_bias=False,
name='gpu_%d' % d.extended._num_gpus_per_worker)
def loss_fn(x):
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(yuefengz, apassos): eager.backprop.implicit_grad is not safe for
# multiple graphs (b/111216820).
def grad_fn(x):
loss = loss_fn(x)
var_list = (
variables.trainable_variables() + ops.get_collection(
ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
grads = gradients.gradients(loss, var_list)
ret = list(zip(grads, var_list))
return ret
def update(v, g):
return v.assign_sub(0.05 * g, use_locking=True)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=[one])
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
# TODO(yuefengz): support non-Mirrored variable as destinations.
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
before_out, after_out = step()
if context.num_gpus() < d.extended._num_gpus_per_worker:
return True
sess.run(variables.global_variables_initializer())
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
return error_after < error_before
def _test_complex_model(self, task_type, task_id, num_gpus):
d, master_target, config = self._get_test_object(task_type, task_id,
num_gpus)
def model_fn():
"""Mnist model with synthetic input."""
data_format = 'channels_last'
input_shape = [28, 28, 1]
l = keras.layers
max_pool = l.MaxPooling2D((2, 2), (2, 2),
padding='same',
data_format=data_format)
model = keras.Sequential([
l.Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=nn.relu), max_pool,
l.Flatten(),
l.Dense(1024, activation=nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
image = random_ops.random_uniform([2, 28, 28])
label = random_ops.random_uniform([2, 1], maxval=10, dtype=dtypes.int32)
logits = model(image, training=True)
# TODO(yuefengz): make loss a callable for eager mode.
loss = losses.sparse_softmax_cross_entropy(labels=label, logits=logits)
optimizer = adam.AdamOptimizer(learning_rate=1e-4)
train_op = optimizer.minimize(loss,
training_util.get_or_create_global_step())
return train_op
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess:
with d.scope():
train_op = d.extended.call_for_each_replica(model_fn)
train_op = d.group(d.unwrap(train_op))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
return True
def _test_variable_initialization(self, task_type, task_id, num_gpus):
distribution, master_target, config = self._get_test_object(
task_type, task_id, num_gpus)
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess, \
distribution.scope():
def model_fn():
x = variable_scope.get_variable(
'x',
shape=(2, 3),
initializer=init_ops.random_uniform_initializer(
1.0, 10.0, dtype=dtypes.float32))
return array_ops.identity(x)
x = distribution.extended.call_for_each_replica(model_fn)
reduced_x = distribution.reduce(reduce_util.ReduceOp.MEAN, x)
x = distribution.unwrap(x)[0]
sess.run(variables.global_variables_initializer())
x_value, reduced_x_value = sess.run([x, reduced_x])
self.assertTrue(
np.allclose(x_value, reduced_x_value, atol=1e-5),
msg=('x_value = %r, reduced_x_value = %r' % (x_value,
reduced_x_value)))
return np.allclose(x_value, reduced_x_value, atol=1e-5)
def _test_input_fn_iterator(self, task_type, task_id, num_gpus, input_fn,
expected_values, test_reinitialize=True):
distribution, master_target, config = self._get_test_object(
task_type, task_id, num_gpus)
devices = distribution.extended.worker_devices
with ops.Graph().as_default(), \
self.cached_session(config=config,
target=master_target) as sess:
iterator = distribution.make_input_fn_iterator(input_fn)
sess.run(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
self.assertEqual(expected_value, computed_value)
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
# After re-initializing the iterator, should be able to iterate again.
if test_reinitialize:
sess.run(iterator.initialize())
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = sess.run([values.select_replica(r, next_element)
for r in range(len(devices))])
self.assertEqual(expected_value, computed_value)
class DistributedCollectiveAllReduceStrategyTest(
CollectiveAllReduceStrategyTestBase,
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0)
def test_num_replicas_in_sync(self):
distribution = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
num_gpus_per_worker=2)
distribution.configure(cluster_spec=self._cluster_spec, task_type='worker',
task_id=0)
num_workers = len(self._cluster_spec.get('chief', []) +
self._cluster_spec.get('worker', []))
self.assertEqual(2 * num_workers,
distribution.num_replicas_in_sync)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testVariableInitialization(self, num_gpus):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testComplexModel(self, num_gpus):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
self._run_between_graph_clients(
self._test_complex_model, self._cluster_spec, num_gpus=num_gpus)
# TODO(yuefengz): Update how we use num_gpus and required_gpus
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1,
use_dataset=[True, False]))
def testMakeInputFnIterator(self, num_gpus, use_dataset):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
# We use CPU as the device when num_gpus = 0
devices_per_worker = max(1, num_gpus)
expected_values = [[i+j for j in range(devices_per_worker)]
for i in range(0, 100, devices_per_worker)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=3*devices_per_worker,
expected_num_input_pipelines=3,
expected_input_pipeline_id=1) # because task_id = 1
self._test_input_fn_iterator('worker', 1, num_gpus,
input_fn, expected_values,
test_reinitialize=use_dataset)
def testUpdateConfigProto(self):
distribution = collective_all_reduce_strategy.CollectiveAllReduceStrategy(
num_gpus_per_worker=2)
distribution.configure(
cluster_spec=self._cluster_spec, task_type='worker', task_id=1)
config_proto = config_pb2.ConfigProto(device_filters=['to_be_overridden'])
rewrite_options = config_proto.graph_options.rewrite_options
rewrite_options.scoped_allocator_opts.enable_op.append('to_be_removed')
new_config = distribution.update_config_proto(config_proto)
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
class DistributedCollectiveAllReduceStrategyTestWithChief(
CollectiveAllReduceStrategyTestBase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0, has_chief=True)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testVariableInitialization(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2], required_gpus=1))
def testComplexModel(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(
self._test_complex_model, self._cluster_spec, num_gpus=num_gpus)
class LocalCollectiveAllReduceStrategy(
CollectiveAllReduceStrategyTestBase,
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'], num_gpus=[2, 4], required_gpus=2))
def testMinimizeLoss(self, num_gpus):
# Collective ops doesn't support strategy with one device.
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if context.executing_eagerly():
strategy, _, _ = self._get_test_object(None, None, num_gpus)
self._test_minimize_loss_eager(strategy)
else:
self._test_minimize_loss_graph(None, None, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[2, 4], required_gpus=2))
def testComplexModel(self, num_gpus):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
self._test_complex_model(None, None, num_gpus)
@combinations.generate(
combinations.combine(mode=['graph', 'eager'], required_gpus=2,
use_dataset=[True, False]))
def testMakeInputFnIterator(self, use_dataset):
num_gpus = 2
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(5 * num_gpus)
else:
def fn():
dataset = dataset_ops.Dataset.range(5 * num_gpus)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [range(i, i + num_gpus) for i in range(0, 10, num_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterator(None, None, num_gpus,
input_fn, expected_values,
test_reinitialize=use_dataset)
def testAllReduceSum(self):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(None, None, num_gpus=2)
with self.cached_session(config=config, target=target):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(None, None, num_gpus=2)
with self.cached_session(config=config, target=target):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(None, None, num_gpus=2)
with self.cached_session(config=config, target=target):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(None, None, num_gpus=2)
with self.cached_session(config=config, target=target):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(None, None, num_gpus=2)
with self.cached_session(config=config, target=target):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self):
if context.num_gpus() < 2: self.skipTest('Not enough GPUs')
distribution, target, config = self._get_test_object(None, None, num_gpus=2)
with self.cached_session(config=config, target=target):
self._test_all_reduce_mean_gradient_tape(distribution)
def testNumpyIterator(self):
num_gpus = 2
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
strategy, _, _ = self._get_test_object(None, None, num_gpus)
self._test_numpy_iterator(strategy)
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8a694374ab5ab30d8a2be86fa9ae8f05d8ed4afe | d64f4e4c2ba479a8339b3517183ac1086c3a4fd7 | /venv/Scripts/django-admin.py | 41558f663821ce43a846c1fd5baaab8a06be93cd | [
"MIT"
] | permissive | cforcross/databases_medium | a56b76323bccd73d48d42b76776a8e3fffd02211 | 7f9ae1e462b7a02db7d303a8a353598e3b5aceed | refs/heads/main | 2023-03-31T10:41:39.892490 | 2021-04-05T10:50:59 | 2021-04-05T10:50:59 | 354,692,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | #!d:\medium\databases\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"ntsendifor@gmail.com"
] | ntsendifor@gmail.com |
f7fef6c0fbd648e0907bd9109c9e8d61b8d6b5ea | 42a43ce3e25d69efcedc17001d113f7f592191f3 | /src/normal_cells_conb/lstm_cn_sep.py | 812d2a36b75e449412832a0833465d7253091e8b | [
"Apache-2.0"
] | permissive | Jakexxh/lstm_normalizations | 99744d38753f375c2db24b7b99fa7261ebcae1aa | a39c2798550b56a81882b718c73da121435b4322 | refs/heads/master | 2021-03-27T16:05:49.187131 | 2019-02-03T07:46:08 | 2019-02-03T07:46:08 | 109,641,676 | 1 | 0 | null | 2017-11-17T06:39:14 | 2017-11-06T03:11:46 | Python | UTF-8 | Python | false | false | 6,199 | py | import tensorflow as tf
import numpy as np
# import time
from tensorflow.python.ops.rnn_cell import RNNCell, LSTMStateTuple
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.platform import tf_logging as logging
from .__init__ import *
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
class CNLSTMCell(RNNCell):
def __init__(self,
num_units,
grain,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None):
super(CNLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._grain = grain
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
concat = self._line_sep([inputs, h], 4 * self._num_units, bias=False)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=concat, num_or_size_splits=4, axis=1)
new_c = (c * sigmoid(f + self._forget_bias) +
sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
def _line_sep(self,
args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to \
be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
[x, h] = args
input = tf.concat([x, h], 1)
x_size = input.get_shape().as_list()[1]
W_xh = tf.get_variable(
'W_xh', [x_size, output_size], initializer=weights_initializer
)
# x_size = x.get_shape().as_list()[1]
# W_xh = tf.get_variable(
# 'W_xh', [x_size, output_size], initializer=weights_initializer
# )
# W_hh = tf.get_variable(
# 'W_hh', [int(output_size / 4), output_size], initializer= weights_initializer
# )
# x = tf.Print(x,[tf.reduce_mean(x)], str(scope)+'x: ')
# h = tf.Print(h,[tf.reduce_mean(h)], str(scope)+'h: ')
# W_xh = tf.Print(W_xh,[tf.reduce_mean(W_xh)], str(scope)+'W_xh: ')
# W_hh = tf.Print(W_hh,[tf.reduce_mean(W_hh)], str(scope)+'W_hh: ')
cn_xh = self.cosine_norm(input, W_xh, 'cn_xh') # one hot vector
# cn_hh = self.cosine_norm(h, W_hh, 'cn_hh')
# cn_xh = tf.Print(cn_xh,[tf.reduce_mean(cn_xh)], str(scope)+'cn_xh: ')
# cn_hh = tf.Print(cn_hh,[tf.reduce_mean(cn_hh)], str(scope)+'cn_hh: ')
res = cn_xh
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(
0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
def cosine_norm(self, x, w, name='cosine_norm'):
with tf.name_scope(name):
x = tf.concat([x, tf.fill([tf.shape(x)[0], 1], 1e-7)], axis=1)
w = tf.concat([w, tf.fill([1, tf.shape(w)[1]], 1e-7)], axis=0)
if tf.equal(tf.shape(x)[1], tf.shape(w)[0]) is not None:
x_l2 = tf.nn.l2_normalize(x, 1)
w_l2 = tf.nn.l2_normalize(w, 0)
cos_mat = tf.matmul(x_l2, w_l2)
gamma = tf.get_variable(
name + '_gamma', [cos_mat.get_shape().as_list()[1]],
initializer=tf.truncated_normal_initializer(
self._grain))
beta = tf.get_variable(
name + '_beta', [cos_mat.get_shape().as_list()[1]],
initializer=tf.zeros_initializer)
return gamma * cos_mat + beta
else:
raise Exception(
'Matrix shape does not match in cosine_norm Operation!')
def identity_initializer_xh(scale):
def _initializer(shape, dtype=tf.float32, partition_info=None):
size = shape[0]
# gate (j) is identity
# t = np.zeros(shape)
t = np.identity(size) * scale
# t[:, :size] = np.identity(size) * scale
# t[:, size * 2:size * 3] = np.identity(size) * scale
# t[:, size * 3:] = np.identity(size) * scale
return tf.constant(t, dtype)
return _initializer
def identity_initializer_hh(scale):
def _initializer(shape, dtype=tf.float32, partition_info=None):
size = shape[0]
# gate (j) is identity
t = np.zeros(shape)
t[:, size:size * 2] = np.identity(size) * scale
t[:, :size] = np.identity(size) * scale
t[:, size * 2:size * 3] = np.identity(size) * scale
t[:, size * 3:] = np.identity(size) * scale
return tf.constant(t, dtype)
return _initializer
| [
"jakewill@qq.com"
] | jakewill@qq.com |
4649092af7c99f1f913f1b6305e81c3ad84e7b26 | 2b4af8810511b5f1ed47fdf5662753b9b4af76b8 | /corehq/apps/case_search/migrations/0004_auto_20170518_2018.py | 90b4ceefdfb27e1e0f3dd0893ad81a5dd1d8a782 | [] | no_license | DeckOfPandas/commcare-wddcp | 55bde89197ec5bc4a4b53d327ec6a811aec0d752 | 810d2e09d3890e3d0d70178745da5924c1db767b | refs/heads/dimagi | 2020-12-02T19:19:53.992796 | 2017-06-30T15:18:16 | 2017-07-05T12:23:26 | 96,325,707 | 1 | 0 | null | 2017-07-05T14:02:49 | 2017-07-05T14:02:49 | null | UTF-8 | Python | false | false | 1,843 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-18 20:18
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('case_search', '0003_casesearchqueryaddition'),
]
operations = [
migrations.CreateModel(
name='FuzzyProperties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(db_index=True, max_length=256)),
('case_type', models.CharField(db_index=True, max_length=256)),
('properties', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True, null=True), null=True, size=None)),
],
),
migrations.AlterField(
model_name='casesearchqueryaddition',
name='query_addition',
field=jsonfield.fields.JSONField(default=dict, help_text=b"More information about how this field is used can be found <a href='https://docs.google.com/document/d/1MKllkHZ6JlxhfqZLZKWAnfmlA3oUqCLOc7iKzxFTzdY/edit#heading=h.k5pky76mwwon'>here</a>. This ES <a href='https://www.elastic.co/guide/en/elasticsearch/guide/1.x/bool-query.html'>documentation</a> may also be useful. This JSON will be merged at the `query.filtered.query` path of the query JSON."),
),
migrations.AlterUniqueTogether(
name='fuzzyproperties',
unique_together=set([('domain', 'case_type')]),
),
migrations.AddField(
model_name='casesearchconfig',
name='fuzzy_properties',
field=models.ManyToManyField(to='case_search.FuzzyProperties'),
),
]
| [
"proteusvacuum@gmail.com"
] | proteusvacuum@gmail.com |
08ccdac741d436ab5e27c2fc379b6c2255343efc | 73515ef9cb8d5217bbbf85da11eaf945f303a88d | /venv/Scripts/pip3-script.py | 41921217fe67a1f7f3bf305f585fd84dd1b4ded5 | [] | no_license | zarruk/my-first-blog | 882dc3810a01dd47def5f992507e35001e3d49c1 | 77877fc630801564c5be3de85f28338ad669b36a | refs/heads/master | 2020-04-17T16:12:29.862254 | 2019-01-21T01:41:15 | 2019-01-21T01:41:15 | 166,731,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!C:\Users\szarruk\djangogirls\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"szarruk@hotmail.com"
] | szarruk@hotmail.com |
0b89c273933090f2847d0f904b8a5b5753190783 | b79c94aac2368fcfe5ec1d4ead7f6a7084d1fe19 | /script/nrg/process_routes.py | f6fd77956ae0c72300fb3000bfcc6fc00bebd9fd | [] | no_license | jonstjohn/Climb-Spotter | 2f1eeae897e5420dd15fd7848c3ccb0c3c15484a | 5eec750f66a63a541099c526bb2f9d675098e599 | refs/heads/master | 2020-03-27T03:40:53.002382 | 2014-02-09T04:59:30 | 2014-02-09T04:59:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | import csv, sys
sys.path.append('/home/jonstjohn/climbspotter/dev/code')
from model import Area, Route
from dbModel import DbArea
import db
f = open('/home/jonstjohn/climbspotter/dev/data/route.txt', 'r')
import re
area_lookup = {}
route_pages = {}
max_page = 1
session = db.csdb.session
# Parse file
p = re.compile('(.*)\.\.+([0-9]*)')
for line in f:
#print line,
m = p.match(line)
name = m.group(1).strip()
name = name.rstrip('.')
page = m.group(2).strip()
page = int(page.lstrip('.'))
if page > max_page:
max_page = page
if not page in route_pages:
route_pages[page] = []
areas = Area.search(name, exact = True)
if len(areas) == 1:
area_lookup[page] = (areas[0]['area_id'], name)
elif len(areas) > 1:
print(" **** {0} areas found for '{1}' ****".format(len(areas), name))
else:
route_pages[page].append(name)
#print(" {0}-{1}".format(name, page))
area_id = None
for page in range(1,max_page+1):
print("Page {0}".format(page))
if page in area_lookup:
print (" AREA - {0} ({1})".format(area_lookup[page][1], area_lookup[page][0]))
area_id = area_lookup[page][0]
if page in route_pages:
for route in route_pages[page]:
print(" {0} ({1})".format(route, area_id))
r = Route.Route()
r.name = route
r.area_id = area_id
r.save()
sys.exit(1)
# Loop over all our areas, try to find in file, start with shortest path length
for area in session.query(DbArea).order_by(DbArea.area_id):
print(area.name)
if area.name in routes:
print("-".join(routes[area.name]))
else:
print(" ***** NOT FOUND ****** ")
# areas = Area.search(name, exact = True)
# if len(areas) != 0:
# area_lookup[areas[0]['area_id']] = page
# print(" ** Appears to be area **")
# if len(areas) > 1:
# print(" XXXXX MORE THAN ONE MATCH XXXX")
print("AREAS")
for name, page in area_lookup.iteritems():
print("{0} - {1}".format(name, page))
| [
"jonstjohn@gmail.com"
] | jonstjohn@gmail.com |
e76376d0d332a1f8cd16367b148f06b9022ba13b | d65128e38be0243f279e0d72ef85e7d3c5e116ca | /base/site-packages/django/db/models/sql/compiler.py | 2a5abdfa217d68efa5ee1eeb553a13fc89e89164 | [
"Apache-2.0"
] | permissive | ZxwZero/fastor | 19bfc568f9a68f1447c2e049428330ade02d451d | dd9e299e250362802032d1984801bed249e36d8d | refs/heads/master | 2021-06-26T06:40:38.555211 | 2021-06-09T02:05:38 | 2021-06-09T02:05:38 | 229,753,500 | 1 | 1 | Apache-2.0 | 2019-12-23T12:59:25 | 2019-12-23T12:59:24 | null | UTF-8 | Python | false | false | 50,329 | py | import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
cols, alias = self._final_join_removal(cols, alias)
for col in cols:
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
cols, alias = self._final_join_removal(cols, alias)
return [(alias, cols, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
cols = [target.column for target in targets]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, cols, alias, joins, opts
def _final_join_removal(self, cols, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
if set(cols) != set(rhs_cols):
break
cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
self.query.unref_alias(alias)
alias = join.lhs_alias
return cols, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
#result.append('%s%s%s' % (connector, qn(name), alias_str))
part = '%s%s%s' % (connector, qn(name), alias_str)
for model, hint in self.query.hints.items():
if model._meta.db_table == name:
part += ' FORCE INDEX (%s)' % ', '.join(hint)
result.append(part)
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or order_params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
fields = [f for f in fields if f.model._meta.db_table not in only_load or
f.column in only_load[f.model._meta.db_table]]
if has_aggregate_select:
# pad None in to fields for aggregates
fields = fields[:aggregate_start] + [
None for x in range(0, aggregate_end - aggregate_start)
] + fields[aggregate_start:]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| [
"edisonlz@163.com"
] | edisonlz@163.com |
989e083986e3aa86a4f9adc4374a6107b1e2010d | cb6b6ee58dbee4604058a3e44142d0ffb7906ba8 | /pypif/obj/common/__init__.py | 7fef72a853f1162fbd7c9a5cb446b86be6fdfaaf | [] | no_license | nad2000/pypif | a3991dab9bf3e0c558a2891b301c3dec494096a6 | dc9923792f91c53ac649b403620a387e1d86cb83 | refs/heads/master | 2020-04-07T07:43:57.980477 | 2016-01-13T21:54:58 | 2016-01-13T21:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | from id import Id
from instrument import Instrument
from license import License
from method import Method
from name import Name
from pages import Pages
from person import Person
from process_step import ProcessStep
from property import Property
from reference import Reference
from scalar import Scalar
from software import Software
from value import Value
| [
"kyle@citrine.io"
] | kyle@citrine.io |
1bcb4661e824eb62e799bf34da49651df50f914b | 066395d7991fb1b1fdf09ef3d6dc6d0a15421590 | /data_collection_processing/preprocess-nostem.py | d079cbcef5976cedc145344c067f4c745af218a4 | [] | no_license | taojin1992/Automated-confirmation-of-protein-function-annotation-using-NLP | cf57beb2e66cec335c49fe3a586e992e65ff5014 | 4d51899b83d9478b4df5145dc266c1c8e4ad7fba | refs/heads/master | 2021-07-13T07:19:45.845437 | 2021-02-19T18:24:28 | 2021-02-19T18:24:28 | 237,134,224 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | ''' text preprocessing pipeline'''
''' Links:
https://stackoverflow.com/questions/8009882/how-to-read-a-large-file-line-by-line-in-python
https://machinelearningmastery.com/clean-text-machine-learning-python/
https://stackoverflow.com/questions/14301056/concatenating-lists-in-python-3
https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory
https://stackoverflow.com/questions/7165749/open-file-in-a-relative-location-in-python
https://stackoverflow.com/questions/3964681/find-all-files-in-a-directory-with-extension-txt-in-python
https://stackoverflow.com/questions/9495007/indenting-code-in-sublime-text-2
'''
from nltk import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from nltk.stem.porter import PorterStemmer
import os
from os import listdir
from os.path import isfile, join
stop_words = stopwords.words('english')
porter = PorterStemmer()
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "TJ-data-2"
abs_file_path = os.path.join(script_dir, rel_path)
onlyfiles = [f for f in listdir(abs_file_path) if isfile(join(abs_file_path, f)) and f.endswith(".txt")]
for text_file in onlyfiles:
filename = join(abs_file_path, text_file)
with open(filename) as f:
for line in f:
paragraph = []
# split the abstract into sentences
sentences = sent_tokenize(line)
# split into words
for sentence in sentences:
tokens = word_tokenize(sentence)
# convert to lower case
tokens = [w.lower() for w in tokens]
# remove punctuation from each word
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
# filter out stop words
words = [w for w in words if not w in stop_words]
# stemming of words
#stemmed = [porter.stem(word) for word in words] # for one single sentence
paragraph.extend(words)
# print the paragraph list element to one line to a new file as the preprocessed file
with open("/Users/jintao/Documents/2019-Spring/Research/NLP-research/TJ-data-2/cleaned-nostem/" + text_file + "-prep", "a") as myfile:
for item in paragraph:
myfile.write(item + " ")
myfile.write("\n")
| [
"noreply@github.com"
] | noreply@github.com |
17c2f3c89f14097acdd060fbfc80357877b1e2f6 | 9d2d9bd58f2daaa9a78c5a5c440a0a5c2b307f28 | /iterator/pancakeMenuIterator.py | 5466ba8363d6e7200cc46f3e3b9e4ded55350cf1 | [] | no_license | dRoje/design-patterns-python | 183c5063b19fb11b5152f21c863fe71e249dd94d | 4eb0438434a33c1b70c22c13f2f9c602a479d7e7 | refs/heads/master | 2021-07-20T12:04:59.368214 | 2017-10-30T09:33:31 | 2017-10-30T09:33:31 | 105,116,358 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from iterator import Iterator
from menuItem import MenuItem
from typing import List
class PancakeMenuIterator(Iterator):
def __init__(self, items):
# type: (List(MenuItem)) -> None
assert isinstance(items, list)
self.items = items
self.position = 0
def next(self):
menuItem = self.items[self.position]
self.position += 1
return menuItem
def hasNext(self):
if self.position >= self.items.__len__() or self.items[self.position] is None:
return False
else:
return True
| [
"duje.roje@include.hr"
] | duje.roje@include.hr |
a8de5acec73280780ad78ec48bcbd2682e922e37 | f65ceefa94fef770b091566f9c6876efb25f84e3 | /structure_ex/structure_test.py | 280cc28e239c7b03913d16604c6b10e51e3dc01c | [
"BSD-2-Clause"
] | permissive | mpentek/pyKratos | df98402673f56799d8546dacc145b90da63f8a74 | 40da2c8632bdf3215b54c3324bfe58a212a9c6c7 | refs/heads/master | 2020-08-29T09:42:17.450745 | 2019-11-27T09:23:03 | 2019-11-27T09:23:03 | 217,996,491 | 0 | 0 | BSD-2-Clause | 2019-10-28T08:12:52 | 2019-10-28T08:12:51 | null | UTF-8 | Python | false | false | 2,796 | py | from __future__ import print_function, absolute_import, division
import sys
sys.path.append("..")
print(sys.path)
from numpy import *
from pyKratos import *
# add variables to be allocated from the list in variables.py
solution_step_variables = [
DISPLACEMENT_X,
DISPLACEMENT_Y,
VELOCITY_X,
VELOCITY_Y,
IS_LAGRANGIAN,
]
# 9 - 8
# | / |
# 7 - 6
# | / |
# 5 - 4
# | / |
# 3 - 2
# | /|
# | / |
# 0 - 1
node_list = {
0: array([0.0, 0.0]),
1: array([0.1, 0.0]),
2: array([0.1, 0.2]),
3: array([0.0, 0.2]),
4: array([0.1, 0.3]),
5: array([0.0, 0.3]),
6: array([0.1, 0.4]),
7: array([0.0, 0.4]),
8: array([0.1, 0.5]),
9: array([0.0, 0.5]),
}
property_list = {
0: {YOUNG_MODULUS: 200e6,
POISSON_RATIO: 0.3,
DENSITY: 1000.0,
BODY_FORCE_X: 10.0,
BODY_FORCE_Y: 0.0,
}
}
element_connectivities = {
1: [0, [0, 1, 2]],
2: [0, [0, 2, 3]],
3: [0, [2, 4, 3]],
4: [0, [3, 4, 5]],
5: [0, [5, 4, 6]],
6: [0, [5, 6, 7]],
7: [0, [7, 6, 8]],
8: [0, [7, 8, 9]],
}
nodal_values = {
VELOCITY_X: [
[0, True, 0.0], # first column is Id of node, second col if fixity, third is imposed value
[1, True, 0.0],
],
VELOCITY_Y: [
[0, True, 0.0],
[1, True, 0.0],
],
}
buffer_size = 3 # store current step and 2 in the past
model_part = ModelPart(buffer_size, solution_step_variables)
model_part.AddNodes(node_list)
model_part.AddProperties(property_list)
model_part.AddElements("triangular_elastic_element", element_connectivities)
model_part.AddNodalValues(nodal_values)
import gear_scheme
time_scheme = gear_scheme.GearScheme(model_part)
builder_and_solver = builder_and_solver.BuilderAndSolver(
model_part, time_scheme)
strategy = solving_strategy.SolvingStrategy(
model_part, time_scheme, builder_and_solver)
strategy.Initialize()
t = []
d = []
zero_based_indices_for_nodes = True
GiDIO = gid_io.GidIO("inputfile.mdpa","gid_out",zero_based_indices_for_nodes)
GiDIO.WriteMesh(model_part,"outmesh")
dt = 0.0005
nsteps = 100
for i in range(1,nsteps):
time = i*dt
model_part.CloneTimeStep(time)
print("time: {:.6}".format(time))
strategy.Solve()
GiDIO.WriteNodalResults(DISPLACEMENT,model_part.NodeIterators(), time)
GiDIO.WriteNodalResults(VELOCITY,model_part.NodeIterators(), time)
# outname = "disp"+str(time)+".png"
# plot_contour.PlotContour(model_part.NodeIterators(), DISPLACEMENT_X, outname )
#
# outname = "vx"+str(time)+".png"
# plot_contour.PlotContour(model_part.NodeIterators(), VELOCITY_X, outname )
# t.append(time)
# d.append(model_part.Nodes[9].GetSolutionStepValue(DISPLACEMENT_X,0))
#print(d)
#import matplotlib.pyplot as plt
#plt.plot(t,d)
#plt.show() | [
"rrossi@cimne.upc.edu"
] | rrossi@cimne.upc.edu |
36171b70c039e3c0d5bbc34dc72349b52a130598 | 0da42e858276eff153999acf3c9abfa0c1a847f4 | /HackerRank/30_Days_of_coding_Day12_Inheritance.py | d4d4643211c8a03e4905d38c866e8e6b2ba88da0 | [
"MIT"
] | permissive | utkarshrutgers/tech-interview-prep | 0efbe5a5c531259db957928f69370d48c681ca22 | f1cb747cab2d0930d8d69d860806991ea09e3e78 | refs/heads/master | 2020-04-22T02:33:47.292090 | 2019-03-21T12:27:12 | 2019-03-21T12:27:12 | 170,054,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print("Name:", self.lastName + ",", self.firstName)
print("ID:", self.idNumber)
class Student(Person):
# Class Constructor
#
# Parameters:
# firstName - A string denoting the Person's first name.
# lastName - A string denoting the Person's last name.
# id - An integer denoting the Person's ID number.
# scores - An array of integers denoting the Person's test scores.
#
# Write your constructor here
def __init__(self,f,l,idn,scores):
super().__init__(f,l,idn)
self.scores = scores
# Function Name: calculate
def calculate(self):
sumn = 0
for i in range(len(self.scores)):
sumn = sumn + self.scores[i]
#print(self.scores[i])
sumn = sumn/len(self.scores)
if(90<=sumn<=100):
return 'O'
elif(80<=sumn<=90):
return 'E'
elif(70<=sumn<=80):
return 'A'
elif(55<=sumn<=70):
return 'P'
elif(40<=sumn<=55):
return 'D'
elif(sumn<40):
return 'T'
# Return: A character denoting the grade.
#
# Write your function here
line = input().split()
firstName = line[0]
lastName = line[1]
idNum = line[2]
numScores = int(input()) # not needed for Python
scores = list( map(int, input().split()) )
s = Student(firstName, lastName, idNum, scores)
s.printPerson()
print("Grade:", s.calculate())
| [
"noreply@github.com"
] | noreply@github.com |
06b73b205e1b8bcfe3abcb46015a46db42931544 | 74cf86509c669799a3a7ed4b7982d59dde695230 | /pilot_paper_code/plotting_code/plotVel_vs_b.py | a97e15c7083695a8923df0db809d5dd2efab34ad | [] | no_license | frenchd24/pilot_paper | e77103ec4873758474f9020c76a8dad86fc6519c | a8d9191f9e435e02a8f6acfbd85ede32bdfd405d | refs/heads/master | 2020-05-20T06:54:13.266061 | 2019-05-07T17:08:42 | 2019-05-07T17:08:42 | 185,438,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,499 | py | #!/usr/bin/env python
'''
By David French (frenchd@astro.wisc.edu)
$Id: plotVel_vs_b.py, v 1.1 04/21/2016
Plot the velocity and velocity difference as a function of dopplar b-parameter
- (03/02/2016)
v1.1: remake plots with v_hel instead of vcorr (4/21/16)
'''
import sys
import os
import csv
from pylab import *
# import atpy
from math import *
from utilities import *
import getpass
import pickle
# from astropy.io.votable import parse,tree
# from vo.table import parse
# import vo.tree
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# ## for Palatino and other serif fonts use:
# #rc('font',**{'family':'serif','serif':['Palatino']})
# rc('text', usetex=True)
###########################################################################
def main():
# assuming 'theFile' contains one name per line, read the file
if getpass.getuser() == 'David':
pickleFilename = '/Users/David/Research_Documents/inclination/git_inclination/pilot_paper_code/pilotData2.p'
# resultsFilename = '/Users/David/Research_Documents/inclination/git_inclination/LG_correlation_combined5_8_edit2.csv'
# saveDirectory = '/Users/David/Research_Documents/inclination/git_inclination/pilot_paper_code/plots2/'
resultsFilename = '/Users/David/Research_Documents/inclination/git_inclination/LG_correlation_combined5_9_edit2.csv'
saveDirectory = '/Users/David/Research_Documents/inclination/git_inclination/pilot_paper_code/plots3/'
elif getpass.getuser() == 'frenchd':
pickleFilename = '/usr/users/frenchd/inclination/git_inclination/pilot_paper_code/pilotData2.p'
# resultsFilename = '/usr/users/frenchd/inclination/git_inclination/LG_correlation_combined5_8_edit2.csv'
# saveDirectory = '/usr/users/frenchd/inclination/git_inclination/pilot_paper_code/plots2/'
resultsFilename = '/usr/users/frenchd/inclination/git_inclination/LG_correlation_combined5_9_edit2.csv'
saveDirectory = '/usr/users/frenchd/inclination/git_inclination/pilot_paper_code/plots3/'
else:
print 'Could not determine username. Exiting.'
sys.exit()
# use the old pickle file to get the full galaxy dataset info
pickleFile = open(pickleFilename,'rU')
fullDict = pickle.load(pickleFile)
pickleFile.close()
# save each plot?
save = False
results = open(resultsFilename,'rU')
reader = csv.DictReader(results)
virInclude = False
cusInclude = False
finalInclude = True
# if match, then the includes in the file have to MATCH the includes above. e.g., if
# virInclude = False, cusInclude = True, finalInclude = False, then only systems
# matching those three would be included. Otherwise, all cusInclude = True would be included
# regardless of the others
match = False
# all the lists to be used for associated lines
lyaVList = []
lyaWList = []
lyaErrList = []
naList = []
bList = []
impactList = []
azList = []
incList = []
fancyIncList = []
cosIncList = []
cosFancyIncList = []
paList = []
vcorrList = []
majList = []
difList = []
envList = []
morphList = []
m15List = []
virList = []
likeList = []
likem15List = []
# for ambiguous lines
lyaVAmbList = []
lyaWAmbList = []
envAmbList = []
for l in reader:
include_vir = eval(l['include_vir'])
include_cus = eval(l['include_custom'])
include = eval(l['include'])
go = False
if match:
if virInclude == include_vir and cusInclude == include_cus:
go = True
else:
go = False
else:
if virInclude and include_vir:
go = True
elif cusInclude and include_cus:
go = True
elif finalInclude and include:
go = True
else:
go = False
if go:
AGNra_dec = eval(l['degreesJ2000RA_DecAGN'])
galaxyRA_Dec = eval(l['degreesJ2000RA_DecGalaxy'])
lyaV = l['Lya_v']
lyaW = l['Lya_W'].partition('pm')[0]
lyaW_err = l['Lya_W'].partition('pm')[2]
env = l['environment']
galaxyName = l['galaxyName']
impact = l['impactParameter (kpc)']
galaxyDist = l['distGalaxy (Mpc)']
pa = l['positionAngle (deg)']
RC3pa = l['RC3pa (deg)']
morph = l['morphology']
vcorr = l['vcorrGalaxy (km/s)']
maj = l['majorAxis (kpc)']
minor = l['minorAxis (kpc)']
inc = l['inclination (deg)']
az = l['azimuth (deg)']
b = l['b'].partition('pm')[0]
b_err = l['b'].partition('pm')[2]
na = eval(l['Na'].partition(' pm ')[0])
# print "l['Na'].partition(' pm ')[2] : ",l['Na'].partition(' pm ')
na_err = eval(l['Na'].partition(' pm ')[2])
likelihood = l['likelihood']
likelihoodm15 = l['likelihood_1.5']
virialRadius = l['virialRadius']
m15 = l['d^1.5']
vel_diff = l['vel_diff']
if isNumber(inc):
cosInc = cos(float(inc) * pi/180.)
if isNumber(maj) and isNumber(minor):
q0 = 0.2
fancyInc = calculateFancyInclination(maj,minor,q0)
cosFancyInc = cos(fancyInc * pi/180)
else:
fancyInc = -99
cosFancyInc = -99
else:
cosInc = -99
inc = -99
fancyInc = -99
cosFancyInc = -99
if isNumber(pa):
pa = float(pa)
elif isNumber(RC3pa):
pa = float(RC3pa)
else:
pa = -99
if isNumber(az):
az = float(az)
else:
az = -99
if isNumber(maj):
maj = float(maj)
virialRadius = float(virialRadius)
else:
maj = -99
virialRadius = -99
if isNumber(b):
b = float(b)
else:
b = -99
# all the lists to be used for associated lines
lyaVList.append(float(lyaV))
lyaWList.append(float(lyaW))
lyaErrList.append(float(lyaW_err))
naList.append(na)
bList.append(float(b))
impactList.append(float(impact))
azList.append(az)
incList.append(float(inc))
fancyIncList.append(fancyInc)
cosIncList.append(cosInc)
cosFancyIncList.append(cosFancyInc)
paList.append(pa)
vcorrList.append(vcorr)
majList.append(maj)
difList.append(float(vel_diff))
envList.append(float(env))
morphList.append(morph)
m15List.append(m15)
virList.append(virialRadius)
likeList.append(likelihood)
likem15List.append(likelihoodm15)
else:
lyaV = l['Lya_v']
lyaW = l['Lya_W'].partition('pm')[0]
lyaW_err = l['Lya_W'].partition('pm')[2]
env = l['environment']
lyaVAmbList.append(float(lyaV))
lyaWAmbList.append(float(lyaW))
envAmbList.append(float(env))
results.close()
# lists for the full galaxy dataset
allPA = fullDict['allPA']
allInclinations = fullDict['allInclinations']
allCosInclinations = fullDict['allCosInclinations']
allFancyInclinations = fullDict['allFancyInclinations']
allCosFancyInclinations = fullDict['allCosFancyInclinations']
total = 0
totalNo = 0
totalYes = 0
totalIsolated = 0
totalGroup = 0
########################################################################################
########################################################################################
# plot velocity difference as a function of dopplar b-parameter
#
plotVeldif_vs_b = True
save = False
if plotVeldif_vs_b:
fig = figure()
ax = fig.add_subplot(111)
countb = 0
countr = 0
count = -1
labelr = 'Redshifted Absorber'
labelb = "Blueshifted Absorber"
for d,b,w,v in zip(difList,bList,lyaWList,virList):
# check if all the values are okay
if isNumber(d) and isNumber(b) and isNumber(w) and isNumber(v):
if d!=-99 and b!=-99 and w!=-99 and v!=-99:
if d>0:
# galaxy is behind absorber, so gas is blue shifted
color = 'Blue'
if countb == 0:
countb +=1
plotb = ax.scatter(b,d,c='Blue',s=50,label= labelb)
if d<0:
# gas is red shifted compared to galaxy
color = 'Red'
if countr == 0:
countr +=1
plotr = ax.scatter(b,abs(d),c='Red',s=50,label= labelr)
plot1 = scatter(b,abs(d),c=color,s=50)
xlabel(r'$b$ [km/s]')
ylabel(r'$\rm \Delta v (km/s)$')
legend(scatterpoints=1)
ax.grid(b=None,which='major',axis='both')
ylim(0,400)
# xlim(0,max(virList)+10)
# ylim(0,max(lyaWList)+50)
# xlim(0,10)
if save:
savefig('{0}/vel_dif(b).pdf'.format(saveDirectory),format='pdf')
else:
show()
########################################################################################
########################################################################################
# plot dopplar b-parameter as a function of Lyalpha absorption velocity
#
plotVel_vs_b = False
save = False
if plotVel_vs_b:
fig = figure()
ax = fig.add_subplot(111)
countb = 0
countr = 0
count = -1
labelr = 'Redshifted Absorber'
labelb = "Blueshifted Absorber"
# give some stats:
print 'bList: ',bList
for d,v,b,vir in zip(difList,lyaVList,bList,virList):
# check if all the values are okay
if isNumber(d) and isNumber(v) and isNumber(b) and isNumber(vir):
if d!=-99 and v!=-99 and b!=-99 and vir!=-99:
if d>0:
# galaxy is behind absorber, so gas is blue shifted
color = 'Blue'
if countb == 0:
countb +=1
plotb = ax.scatter(v,b,c='Blue',s=50,label= labelb)
if d<0:
# gas is red shifted compared to galaxy
color = 'Red'
if countr == 0:
countr +=1
plotr = ax.scatter(v,b,c='Red',s=50,label= labelr)
plot1 = scatter(v,b,c=color,s=50)
xlabel(r'$\rm v_{Ly\alpha} (km/s)$')
ylabel(r'$b$ [km/s]')
legend(scatterpoints=1)
ax.grid(b=None,which='major',axis='both')
# ylim(0,400)
# xlim(0,max(envList)+1)
if save:
savefig('{0}/b_(vel).pdf'.format(saveDirectory),format='pdf')
else:
show()
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
if __name__=="__main__":
# do the work
main()
| [
"frenchd24@gmail.com"
] | frenchd24@gmail.com |
22b0dd663c07ff294d1a3fe025edea0bcad273d5 | f63e0d8bf6322f1e7361153f42fdc22dd360adb1 | /publishconf.py | 2a6871be81fc2745039ed3250e348850d658b0ec | [] | no_license | missfall/lnmp100.github.io | c9c0658460c3ba8be1a5646c23d3cdef38b27b26 | 24150f076f2d333808871b106a73474ad2541def | refs/heads/master | 2021-05-28T20:05:25.735844 | 2015-03-30T06:27:16 | 2015-03-30T06:27:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://www.lnmp100.com'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| [
"quanlongwang@gmail.com"
] | quanlongwang@gmail.com |
02a5188a89737dca40113ba69cfee3a1d86f8993 | aa2a8eb6aad4aa9336f122ec171ebfc6e5b9d79c | /Milestone 4/stock_analysis.py | 8ac6bbca4fb8411ca9be2d1d5fdfc4bcba2677e4 | [] | no_license | hafiz-hussin/hafiz-hussin | d0afea52cbae26858ee5bb8d718562f50eaf112e | 8886d6ce3ff4b456339939b316a89ba1efb483d0 | refs/heads/master | 2020-05-30T15:51:33.929216 | 2019-06-02T10:31:48 | 2019-06-02T10:31:48 | 189,830,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,948 | py | # Libraries
import pandas as pd
import numpy as np
import fbprophet
from pytrends.request import TrendReq
import pandas_datareader as wb
import matplotlib.pyplot as plt
import matplotlib
# Main class for data analysis
class Stocker():
# Initialization requires a ticker symbol
def __init__(self, ticker):
# Enforce capitalization
ticker = ticker.upper()
# Symbol is used for labeling plots
self.symbol = ticker
# Retrieval the financial data
# CIMB = "1023.KL"
try:
stock = wb.get_data_yahoo (ticker, start='2017-1-1', end='2018-1-1')
except Exception as e:
print('Error Retrieving Data.')
print(e)
return
# Set the index to a column called Date
stock = stock.reset_index(level=0)
# Columns required for prophet
stock['ds'] = stock['Date']
stock['y'] = stock['Adj Close']
stock['Daily Change'] = stock['Adj Close'] - stock['Open']
# Data assigned as class attribute
self.stock = stock.copy()
# Minimum and maximum date in range
self.min_date = min(stock['Date'])
self.max_date = max(stock['Date'])
# Find max and min prices and dates on which they occurred
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
# The starting price (starting with the opening price)
self.starting_price = float(self.stock.ix[0, 'Open'])
# The most recent price
self.most_recent_price = float(self.stock.ix[len(self.stock) - 1, 'y'])
# Whether or not to round dates
self.round_dates = True
# Number of years of data to train on
self.training_years = 1
# Prophet parameters
# Default prior from library
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
print('{} Stocker Initialized. Data covers {} to {}.'.format(self.symbol,
self.min_date.date(),
self.max_date.date()))
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date.date() < start_date.date():
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date = pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date.date() > self.max_date.date():
print('End Date exceeds data range')
end_date = pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date.date() < self.min_date.date():
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
"""
Return the dataframe trimmed to the specified range.
"""
def make_df(self, start_date, end_date, df=None):
# Default is to use the object stock data
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] <= end_date.date())]
else:
# If both are in dataframe, round neither
if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] <= end_date.date())]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date.date()) &
(df['Date'] <= end_date.date())]
# If only end is imssing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] < end_date.date())]
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: '))
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date.date()) &
(df['Date'] <= end_date.date())]
return trim_df
def reset_plot(self):
# Restore default parameters
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
# Adjust a few parameters to liking
matplotlib.rcParams['figure.figsize'] = (8, 5)
matplotlib.rcParams['axes.labelsize'] = 10
matplotlib.rcParams['xtick.labelsize'] = 8
matplotlib.rcParams['ytick.labelsize'] = 8
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['text.color'] = 'k'
# Method to linearly interpolate prices on the weekends
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
# Remove weekends from a dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe
# Basic Historical Plots and Basic Statistics
def plot_stock(self, start_date=None, end_date=None, stats=['Adj Close'], plot_type='basic'):
self.reset_plot()
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
stock_plot = self.make_df(start_date, end_date)
colors = ['r', 'b', 'g', 'y', 'c', 'm']
for i, stat in enumerate(stats):
stat_min = min(stock_plot[stat])
stat_max = max(stock_plot[stat])
stat_avg = np.mean(stock_plot[stat])
date_stat_min = stock_plot[stock_plot[stat] == stat_min]['Date']
date_stat_min = date_stat_min[date_stat_min.index[0]].date()
date_stat_max = stock_plot[stock_plot[stat] == stat_max]['Date']
date_stat_max = date_stat_max[date_stat_max.index[0]].date()
print('Maximum {} = {:.2f} on {}.'.format(stat, stat_max, date_stat_max))
print('Minimum {} = {:.2f} on {}.'.format(stat, stat_min, date_stat_min))
print('Current {} = {:.2f} on {}.\n'.format(stat, self.stock.ix[len(self.stock) - 1, stat],
self.max_date.date()))
# Percentage y-axis
if plot_type == 'pct':
# Simple Plot
plt.style.use('fivethirtyeight');
if stat == 'Daily Change':
plt.plot(stock_plot['Date'], 100 * stock_plot[stat],
color=colors[i], linewidth=2.4, alpha=0.9,
label=stat)
else:
plt.plot(stock_plot['Date'], 100 * (stock_plot[stat] - stat_avg) / stat_avg,
color=colors[i], linewidth=2.4, alpha=0.9,
label=stat)
plt.xlabel('Date');
plt.ylabel('Change Relative to Average (%)');
plt.title('%s Stock History' % self.symbol);
plt.legend(prop={'size': 10})
plt.grid(color='k', alpha=0.4);
# Stat y-axis
elif plot_type == 'basic':
plt.style.use('fivethirtyeight');
plt.plot(stock_plot['Date'], stock_plot[stat], color=colors[i], linewidth=3, label=stat, alpha=0.8)
plt.xlabel('Date');
plt.ylabel('RM');
plt.title('%s Stock History' % self.symbol);
plt.legend(prop={'size': 10})
plt.grid(color='k', alpha=0.4);
plt.show();
def retrieve_google_trends(self, search, date_range):
# Set up the trend fetching object
pytrends = TrendReq(hl='en-US', tz=360)
kw_list = [search]
try:
# Create the search object
pytrends.build_payload(kw_list, cat=0, timeframe=date_range[0], geo='', gprop='news')
# Retrieve the interest over time
trends = pytrends.interest_over_time()
related_queries = pytrends.related_queries()
except Exception as e:
print('\nGoogle Search Trend retrieval failed.')
print(e)
return
return trends, related_queries
# Create a prophet model without training
def create_model(self):
# Make the model
model = fbprophet.Prophet(daily_seasonality=self.daily_seasonality,
weekly_seasonality=self.weekly_seasonality,
yearly_seasonality=self.yearly_seasonality,
changepoint_prior_scale=self.changepoint_prior_scale,
changepoints=self.changepoints)
if self.monthly_seasonality:
# Add monthly seasonality
model.add_seasonality(name='monthly', period=30.5, fourier_order=5)
return model
def change_analysis(self, count):
self.reset_plot()
model = self.create_model()
train = self.stock[self.stock['Date'] > (self.max_date - pd.DateOffset(years=self.training_years)).date()]
# train = self.stock
model.fit(train)
# Predictions of the training data (no future periods)
future = model.make_future_dataframe(periods=0, freq='D')
future = model.predict(future)
train = pd.merge(train, future[['ds', 'yhat']], on='ds', how='inner')
train.to_csv('train_cimb.csv')
changepoints = model.changepoints
train = train.reset_index(drop=True)
# Create dataframe of only changepoints
change_indices = []
for changepoint in (changepoints):
change_indices.append(train[train['ds'] == changepoint.date()].index[0])
c_data = train.loc[change_indices, :]
deltas = model.params['delta'][0]
c_data['delta'] = deltas
c_data['abs_delta'] = abs(c_data['delta'])
# c_data.to_csv('cimb_changes.csv')
# Sort the values by maximum change
c_data = c_data.sort_values(by='abs_delta', ascending=False)
# Print changes
print(c_data[:count])
def changepoint_date_analysis(self, search=None):
self.reset_plot()
model = self.create_model()
# Use past self.training_years years of data
train = self.stock[self.stock['Date'] > (self.max_date - pd.DateOffset(years=self.training_years)).date()]
# train = self.stock
model.fit(train)
# Predictions of the training data (no future periods)
future = model.make_future_dataframe(periods=0, freq='D')
future = model.predict(future)
train = pd.merge(train, future[['ds', 'yhat']], on='ds', how='inner')
changepoints = model.changepoints
train = train.reset_index(drop=True)
# Create dataframe of only changepoints
change_indices = []
for changepoint in (changepoints):
change_indices.append(train[train['ds'] == changepoint.date()].index[0])
c_data = train.ix[change_indices, :]
deltas = model.params['delta'][0]
c_data['delta'] = deltas
c_data['abs_delta'] = abs(c_data['delta'])
# Sort the values by maximum change
c_data = c_data.sort_values(by='abs_delta', ascending=False)
# Limit to 10 largest changepoints
c_data = c_data[:10]
# Separate into negative and positive changepoints
cpos_data = c_data[c_data['delta'] > 0]
cneg_data = c_data[c_data['delta'] < 0]
# Changepoints and data
if not search:
print('\nChangepoints sorted by slope rate of change (2nd derivative):\n')
print(c_data.ix[:, ['Date', 'Close', 'delta']][:5])
# Line plot showing actual values, estimated values, and changepoints
self.reset_plot()
# Set up line plot
plt.plot(train['ds'], train['y'], 'ko', ms=4, label='Stock Price')
plt.plot(future['ds'], future['yhat'], color='navy', linewidth=2.0, label='Modeled')
# Changepoints as vertical lines
plt.vlines(cpos_data['ds'].dt.to_pydatetime(), ymin=min(train['y']), ymax=max(train['y']),
linestyles='dashed', color='r',
linewidth=1.2, label='Negative Changepoints')
plt.vlines(cneg_data['ds'].dt.to_pydatetime(), ymin=min(train['y']), ymax=max(train['y']),
linestyles='dashed', color='darkgreen',
linewidth=1.2, label='Positive Changepoints')
plt.legend(prop={'size': 10});
plt.xlabel('Date');
plt.ylabel('Price (RM)');
plt.title('Stock Price with Changepoints')
plt.show()
# Search for search term in google news
# Show related queries, rising related queries
# Graph changepoints, search frequency, stock price
if search:
date_range = ['%s %s' % (str(min(train['Date']).date()), str(max(train['Date']).date()))]
# Get the Google Trends for specified terms and join to training dataframe
trends, related_queries = self.retrieve_google_trends(search, date_range)
if (trends is None) or (related_queries is None):
print('No search trends found for %s' % search)
return
print('\n Top Related Queries: \n')
print(related_queries[search]['top'].head())
print(related_queries[search])
print('\n Rising Related Queries: \n')
print(related_queries[search]['rising'].head())
# Upsample the data for joining with training data
trends = trends.resample('D')
# print("trends intrrpolate")
# print(trends.interpolate())
# Interpolate the frequency
trends = trends.interpolate()
# trends.reset_index(level=0)
trends = trends.reset_index()
trends.reset_index(inplace=True)
trends = trends.rename(columns={'date': 'ds', search: 'freq'})
# Merge with the training data
train = pd.merge(train, trends, on='ds', how='inner')
# Normalize values
train['y_norm'] = train['y'] / max(train['y'])
train['freq_norm'] = train['freq'] / max(train['freq'])
self.reset_plot()
# Plot the normalized stock price and normalize search frequency
plt.plot(train['ds'], train['y_norm'], 'k-', label='Stock Price')
plt.plot(train['ds'], train['freq_norm'], color='goldenrod', label='Search Frequency')
# Changepoints as vertical lines
plt.vlines(cpos_data['ds'].dt.to_pydatetime(), ymin=0, ymax=1,
linestyles='dashed', color='r',
linewidth=1.2, label='Negative Changepoints')
plt.vlines(cneg_data['ds'].dt.to_pydatetime(), ymin=0, ymax=1,
linestyles='dashed', color='darkgreen',
linewidth=1.2, label='Positive Changepoints')
# Plot formatting
plt.legend(prop={'size': 10})
plt.xlabel('Date');
plt.ylabel('Normalized Values');
plt.title('%s Stock Price and Search Frequency for %s' % (self.symbol, search))
plt.show()
# train.to_csv('train_analysis.csv')
def changepoint_news_analysis(self):
self.reset_plot()
model = self.create_model()
# Use past self.training_years years of data
train = self.stock[self.stock['Date'] > (self.max_date - pd.DateOffset(years=self.training_years)).date()]
# train = self.stock
model.fit(train)
# Predictions of the training data (no future periods)
future = model.make_future_dataframe(periods=0, freq='D')
future = model.predict(future)
train = pd.merge(train, future[['ds', 'yhat']], on='ds', how='inner')
train = train.reset_index(drop=True)
# Line plot showing actual values, estimated values, and changepoints
self.reset_plot()
# Import news
news = pd.read_csv('cimb_news_cimb_news.csv')
print(news.head(10))
train = pd.read_csv('plot_news.csv')
# print(train.head(20))
# seperate into 1 and 0 news
cpos_data = news[news['Score'] == 1]
cneu_data = news[news['Score'] == 0]
plt.plot(train['ds'], train['y'], 'ko', ms=4, label='Stock Price')
plt.plot(train['ds'], train['yhat'], color='navy', linewidth=2.0, label='Modeled')
# plt.show()
# df["TimeReviewed"] = pd.to_datetime(df["TimeReviewed"])
cpos_data['Date'] = pd.to_datetime(cpos_data['Date'])
cneu_data['Date'] = pd.to_datetime(cneu_data['Date'])
# Changepoints as vertical lines
plt.vlines(cpos_data["Date"].astype(str), ymin=min(train['y']), ymax=max(train['y']),
linestyles='dashed', color='r',
linewidth=1.2, label='Positive News')
# plt.vlines(cneu_data['Date'].astype(str), ymin=min(train['y']), ymax=max(train['y']),
# linestyles='dashed', color='darkgreen',
# linewidth=1.2, label='Neutral News')
plt.legend(prop={'size': 10});
plt.xlabel('Date');
plt.ylabel('Price (RM)');
plt.title('Stock Price with News')
plt.show()
# test Stocker Class
CIMB = "1023.KL"
cimb = Stocker(CIMB)
#
cimb.plot_stock()
cimb.plot_stock(stats = ['Daily Change', 'Volume'], plot_type='pct')
cimb.change_analysis(10)
cimb.changepoint_date_analysis()
cimb.changepoint_date_analysis(search = 'US China')
cimb.changepoint_date_analysis(search = 'Donald Trump')
# cimb.changepoint_news_analysis()
# sunway
SUNWAY = "5211.KL"
sunway = Stocker(SUNWAY)
sunway.plot_stock()
sunway.changepoint_date_analysis(search = 'US China')
# OSK
OSK = "5053.KL"
osk = Stocker(OSK)
osk.plot_stock()
# Airport
AIRPORT = "5014.KL"
airport = Stocker(AIRPORT)
airport.plot_stock() | [
"hafiz.hussin@terang.asia"
] | hafiz.hussin@terang.asia |
4efbe7d283a9b37489197ba6622c3df79b4585f0 | 4d10a701a6821a031a3546d94d1ce20bd0a0dccc | /functional_tests/test_simple_list_creation.py | 7bf0dc839bbc654b880a5bae930da5b88061d2a2 | [] | no_license | MagdaZawora/superlists_TDD | 202465ddb3d9563b5e5725f70cfd127ba70048ae | 28e08c1780e5d1c6d6747e111889bf945ec57a8a | refs/heads/master | 2021-09-06T07:10:19.324324 | 2018-02-03T16:10:24 | 2018-02-03T16:10:24 | 109,604,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,110 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class NewVisitorTest(FunctionalTest):
def test_can_start_a_list_for_one_user(self):
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
self.browser.get(self.live_server_url)
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Use peacock feathers to make a fly')
inputbox.send_keys(Keys.ENTER)
# The page updates again, and now shows both items on her list
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
self.wait_for_row_in_list_table('1: Buy peacock feathers')
## from ere - out of test?
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# self.fail('Finish the test!')
# She visits that URL - her to-do list is still there.
## to here
# Satisfied, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy peacock feathers')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Chrome()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep
| [
"magda.zawora@gmail.com"
] | magda.zawora@gmail.com |
63c7535e6182eb98cb9fd6c4f332242264692c4b | 6a3b57dd72c26e7c2e6091a20bc30706531736c3 | /test/test_dpr_retriever.py | 685ef191bd575082aa89d8c2e191848a839e7095 | [
"Apache-2.0"
] | permissive | karimjp/haystack | 3e3ef89e214064fc286804c0a5f6302c3e9a1f3c | d8154939fc2fdb436c8964de39f265e21da1fadf | refs/heads/master | 2023-01-31T12:47:28.286748 | 2020-12-11T11:10:24 | 2020-12-11T11:10:24 | 285,337,374 | 0 | 0 | Apache-2.0 | 2020-08-05T16:38:45 | 2020-08-05T15:53:33 | null | UTF-8 | Python | false | false | 6,156 | py | import pytest
import time
import numpy as np
from haystack import Document
from haystack.retriever.dense import DensePassageRetriever
from transformers import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("document_store", ["elasticsearch", "faiss", "memory"], indirect=True)
@pytest.mark.parametrize("retriever", ["dpr"], indirect=True)
@pytest.mark.parametrize("return_embedding", [True, False])
def test_dpr_retrieval(document_store, retriever, return_embedding):
documents = [
Document(
text="""Aaron Aaron ( or ; ""Ahärôn"") is a prophet, high priest, and the brother of Moses in the Abrahamic religions. Knowledge of Aaron, along with his brother Moses, comes exclusively from religious texts, such as the Bible and Quran. The Hebrew Bible relates that, unlike Moses, who grew up in the Egyptian royal court, Aaron and his elder sister Miriam remained with their kinsmen in the eastern border-land of Egypt (Goshen). When Moses first confronted the Egyptian king about the Israelites, Aaron served as his brother's spokesman (""prophet"") to the Pharaoh. Part of the Law (Torah) that Moses received from""",
meta={"name": "0"}
),
Document(
text="""Democratic Republic of the Congo to the south. Angola's capital, Luanda, lies on the Atlantic coast in the northwest of the country. Angola, although located in a tropical zone, has a climate that is not characterized for this region, due to the confluence of three factors: As a result, Angola's climate is characterized by two seasons: rainfall from October to April and drought, known as ""Cacimbo"", from May to August, drier, as the name implies, and with lower temperatures. On the other hand, while the coastline has high rainfall rates, decreasing from North to South and from to , with""",
),
Document(
text="""Schopenhauer, describing him as an ultimately shallow thinker: ""Schopenhauer has quite a crude mind ... where real depth starts, his comes to an end."" His friend Bertrand Russell had a low opinion on the philosopher, and attacked him in his famous ""History of Western Philosophy"" for hypocritically praising asceticism yet not acting upon it. On the opposite isle of Russell on the foundations of mathematics, the Dutch mathematician L. E. J. Brouwer incorporated the ideas of Kant and Schopenhauer in intuitionism, where mathematics is considered a purely mental activity, instead of an analytic activity wherein objective properties of reality are""",
meta={"name": "1"}
),
Document(
text="""The Dothraki vocabulary was created by David J. Peterson well in advance of the adaptation. HBO hired the Language Creatio""",
meta={"name": "2"}
),
Document(
text="""The title of the episode refers to the Great Sept of Baelor, the main religious building in King's Landing, where the episode's pivotal scene takes place. In the world created by George R. R. Martin""",
meta={}
)
]
document_store.return_embedding = return_embedding
document_store.write_documents(documents)
document_store.update_embeddings(retriever=retriever)
time.sleep(1)
docs_with_emb = document_store.get_all_documents()
if return_embedding is True:
assert (len(docs_with_emb[0].embedding) == 768)
assert (abs(docs_with_emb[0].embedding[0] - (-0.3063)) < 0.001)
assert (abs(docs_with_emb[1].embedding[0] - (-0.3914)) < 0.001)
assert (abs(docs_with_emb[2].embedding[0] - (-0.2470)) < 0.001)
assert (abs(docs_with_emb[3].embedding[0] - (-0.0802)) < 0.001)
assert (abs(docs_with_emb[4].embedding[0] - (-0.0551)) < 0.001)
res = retriever.retrieve(query="Which philosopher attacked Schopenhauer?")
assert res[0].meta["name"] == "1"
# test embedding
if return_embedding is True:
assert res[0].embedding is not None
else:
assert res[0].embedding is None
@pytest.mark.parametrize("retriever", ["dpr"], indirect=True)
@pytest.mark.parametrize("document_store", ["memory"], indirect=True)
def test_dpr_saving_and_loading(retriever, document_store):
retriever.save("test_dpr_save")
def sum_params(model):
s = []
for p in model.parameters():
n = p.cpu().data.numpy()
s.append(np.sum(n))
return sum(s)
original_sum_query = sum_params(retriever.query_encoder)
original_sum_passage = sum_params(retriever.passage_encoder)
del retriever
loaded_retriever = DensePassageRetriever.load("test_dpr_save", document_store)
loaded_sum_query = sum_params(loaded_retriever.query_encoder)
loaded_sum_passage = sum_params(loaded_retriever.passage_encoder)
assert abs(original_sum_query - loaded_sum_query) < 0.1
assert abs(original_sum_passage - loaded_sum_passage) < 0.1
# comparison of weights (RAM intense!)
# for p1, p2 in zip(retriever.query_encoder.parameters(), loaded_retriever.query_encoder.parameters()):
# assert (p1.data.ne(p2.data).sum() == 0)
#
# for p1, p2 in zip(retriever.passage_encoder.parameters(), loaded_retriever.passage_encoder.parameters()):
# assert (p1.data.ne(p2.data).sum() == 0)
# attributes
assert loaded_retriever.embed_title == True
assert loaded_retriever.batch_size == 16
assert loaded_retriever.max_seq_len_passage == 256
assert loaded_retriever.max_seq_len_query == 64
# Tokenizer
assert isinstance(loaded_retriever.passage_tokenizer, DPRContextEncoderTokenizerFast)
assert isinstance(loaded_retriever.query_tokenizer, DPRQuestionEncoderTokenizerFast)
assert loaded_retriever.passage_tokenizer.do_lower_case == True
assert loaded_retriever.query_tokenizer.do_lower_case == True
assert loaded_retriever.passage_tokenizer.vocab_size == 30522
assert loaded_retriever.query_tokenizer.vocab_size == 30522
assert loaded_retriever.passage_tokenizer.max_len == 512
assert loaded_retriever.query_tokenizer.max_len == 512
| [
"noreply@github.com"
] | noreply@github.com |
8439b12a9a6954d37d74fc8c38bdef508d8b454a | 18b7bb4447c901d5f5f3b50dd61228f837aaed1e | /booking/migrations/0006_auto_20200729_1334.py | 34ab198b17bf5b6224fdb75a9f88b9ee30237345 | [] | no_license | sauravrawat710/Hotel-Booking-System | 3916697dba17867c4a75a449e428ebda43127ac1 | a1ea6f4c9c7d4751af2b9eac22632769ef81ce90 | refs/heads/master | 2022-11-26T17:47:15.633562 | 2020-08-03T11:10:02 | 2020-08-03T11:10:02 | 284,477,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # Generated by Django 3.0.8 on 2020-07-29 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0005_auto_20200729_1326'),
]
operations = [
migrations.AddField(
model_name='booking',
name='no_of_guest',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='hotel',
name='price',
field=models.PositiveIntegerField(default=0),
),
]
| [
"sauravrawat710@gmail.com"
] | sauravrawat710@gmail.com |
998b3ea5732626bacdf6df8072694eda9201a22b | 6479133bb2455410f01863c79a8927b32d9750aa | /FacebookDexter/Engine/Algorithms/FacebookRecommendationEnhancer/FacebookRecommendationEnhancerBase.py | 0ac19855872439393dc2f02706994658a9bcee7d | [] | no_license | jssellars/aniket_filed | 3e46f78ad8d8670def8708fc6a502be5382c02b1 | 17b93889c6945db15ed8b57147def2ae89a07de5 | refs/heads/master | 2023-06-05T00:58:48.674151 | 2021-06-23T07:56:21 | 2021-06-23T07:56:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | import typing
from FacebookDexter.Engine.Algorithms.FacebookRecommendationEnhancer.FacebookRecommendation import \
FacebookRecommendation
from FacebookDexter.Engine.Algorithms.FacebookRecommendationEnhancer.FacebookRecommendationEnhancerBuilder import \
FacebookRecommendationEnhancerBuilder
from FacebookDexter.Engine.Algorithms.FacebookRecommendationEnhancer.FacebookRecommendationEnum import \
FacebookRecommendationImportanceEnum, FacebookRecommendationConfidenceEnum, FacebookRecommendationFieldsEnum
from FacebookDexter.Infrastructure.Domain.Recommendations.FacebookRecommendationBuilder import FacebookRecommendationBuilder
class FacebookRecommendationEnhancerBase(FacebookRecommendationEnhancerBuilder):
def __init__(self):
super().__init__()
def check_run_status(self, *args, **kwargs):
return True
def _map_importance(self, importance):
if not importance:
return None
mapper = {
'HIGH': FacebookRecommendationImportanceEnum.HIGH,
'MEDIUM': FacebookRecommendationImportanceEnum.MEDIUM,
'LOW': FacebookRecommendationImportanceEnum.LOW,
}
return mapper[importance]
def _map_confidence(self, confidence):
if not confidence:
return None
mapper = {
'HIGH': FacebookRecommendationConfidenceEnum.HIGH.value,
'MEDIUM': FacebookRecommendationConfidenceEnum.MEDIUM.value,
'LOW': FacebookRecommendationConfidenceEnum.LOW.value,
}
return mapper[confidence]
def run(self, structure_id: typing.AnyStr = None) -> typing.List[typing.Dict]:
if not structure_id:
return []
details = self._mongo_repository.get_structure_details(structure_id, level=self._level)
try:
facebook_recommendations_raw = details['recommendations']
facebook_recommendations = []
for facebook_recommendation_raw in facebook_recommendations_raw:
facebook_recommendation = FacebookRecommendation()
facebook_recommendation.title = facebook_recommendation_raw.get(
FacebookRecommendationFieldsEnum.TITLE.value, None)
facebook_recommendation.message = facebook_recommendation_raw.get(
FacebookRecommendationFieldsEnum.MESSAGE.value, None)
facebook_recommendation.importance = self._map_importance(
facebook_recommendation_raw.get(FacebookRecommendationFieldsEnum.IMPORTANCE.value, None))
facebook_recommendation.confidence = self._map_confidence(
facebook_recommendation_raw.get(FacebookRecommendationFieldsEnum.CONFIDENCE.value, None))
facebook_recommendation.blame_field = facebook_recommendation_raw.get(
FacebookRecommendationFieldsEnum.BLAME_FIELD.value, None)
recommendation = FacebookRecommendationBuilder(mongo_repository=self._mongo_repository,
time_interval=self._time_interval)
recommendation = recommendation.create_facebook_recommendation(facebook_id=structure_id,
level=self._level,
template=facebook_recommendation.message,
confidence=facebook_recommendation.confidence,
importance=facebook_recommendation.importance,
)
facebook_recommendations.append(recommendation.to_dict())
except Exception as e:
return []
return facebook_recommendations | [
"Bogdan@filed.com"
] | Bogdan@filed.com |
c34b53a78f9de5ad48b31a02be0b21f78801fa49 | 3b65087d82450f7c1a9a97c9936c16aecaa7ca2a | /processView.py | c0e52f165d972c3c9800ed83c12717fd1e10e5b0 | [] | no_license | adrielk/lsst-optimizations | 340af80b93a6b0926f4d13ce3c3b5f9899e58300 | 94b1654aa28e5feab1d9547a14f8d6ed05a81595 | refs/heads/master | 2023-02-02T05:33:43.155664 | 2020-12-22T19:18:05 | 2020-12-22T19:18:05 | 277,023,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | #Displaying exposure using lsst pipelines
import lsst.daf.persistence as dafPersist
import lsst.afw.display as afwDisplay
butler = dafPersist.Butler(inputs='/home/adriel/DATA/rerun/processCcdOutputs')
data = butler.queryMetadata('calexp', ['visit', 'ccd'], dataId={'filter': 'HSC-R'})
print(data)
calexp = butler.get('calexp',dataId = {'filter':'HSC-R', 'visit':903334, 'ccd':23})
display = afwDisplay.getDisplay(backend = 'ds9')
display.mtv(calexp)
"""
src = butler.get('src', dataId = {'filter':'HSC-R', 'visit':903334, 'ccd':23})
print(len(src))
#print(src.getSchema())
print(src.schema.find("calib_psf_used"))
mask = calexp.getMask()
for maskName, maskBit in mask.getMaskPlaneDict().items():
print('{}: {}'.format(maskName, display.getMaskPlaneColor(maskName)))
""" | [
"theadrielkim@gmail.com"
] | theadrielkim@gmail.com |
f3cd6766d23f03656ef3274d07cce9ea1489c132 | 350ecc8259bcad075bd376423335bb41cc8a533e | /classic_strategy1.py | 50460f25736ecc5fcf66524f4c162073165d5ca1 | [] | no_license | CodedQuen/python_begin | 39da66ecc4a77b94a5afbbf0900727c8156b85e1 | 1433c319b5d85520c50aee00dd4b6f21a7e6366a | refs/heads/master | 2022-06-10T10:30:28.807874 | 2020-04-25T03:34:03 | 2020-04-25T03:34:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,457 | py | # classic_strategy.py
# Strategy pattern -- classic implementation
"""
# BEGIN CLASSIC_STRATEGY_TESTS
>>> joe = Customer('John Doe', 0) # <1>
>>> ann = Customer('Ann Smith', 1100)
>>> cart = [LineItem('banana', 4, .5), # <2>
... LineItem('apple', 10, 1.5),
... LineItem('watermellon', 5, 5.0)]
>>> Order(joe, cart, FidelityPromo()) # <3>
<Order total: 42.00 due: 42.00>
>>> Order(ann, cart, FidelityPromo()) # <4>
<Order total: 42.00 due: 39.90>
>>> banana_cart = [LineItem('banana', 30, .5), # <5>
... LineItem('apple', 10, 1.5)]
>>> Order(joe, banana_cart, BulkItemPromo()) # <6>
<Order total: 30.00 due: 28.50>
>>> long_order = [LineItem(str(item_code), 1, 1.0) # <7>
... for item_code in range(10)]
>>> Order(joe, long_order, LargeOrderPromo()) # <8>
<Order total: 10.00 due: 9.30>
>>> Order(joe, cart, LargeOrderPromo())
<Order total: 42.00 due: 42.00>
# END CLASSIC_STRATEGY_TESTS
"""
# BEGIN CLASSIC_STRATEGY
from abc import ABC, abstractmethod
from collections import namedtuple
Customer = namedtuple('Customer', 'name fidelity')
class LineItem:
def __init__(self, product, quantity, price):
self.product = product
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
class Order: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion.discount(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
class Promotion(ABC): # the Strategy: an Abstract Base Class
@abstractmethod
def discount(self, order):
"""Return discount as a positive dollar amount"""
class FidelityPromo(Promotion): # first Concrete Strategy
"""5% discount for customers with 1000 or more fidelity points"""
def discount(self, order):
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
class BulkItemPromo(Promotion): # second Concrete Strategy
"""10% discount for each LineItem with 20 or more units"""
def discount(self, order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
class LargeOrderPromo(Promotion): # third Concrete Strategy
"""7% discount for orders with 10 or more distinct items"""
def discount(self, order):
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
joe = Customer('John Doe', 0)
ann = Customer('Ann Smith', 1100)
cart = [LineItem('banana', 4, .5),
LineItem('apple', 10, 1.5),
LineItem('watermellon', 5, 5.0)]
print(Order(joe, cart, FidelityPromo()))
# END CLASSIC_STRATEGY
| [
"noreply@github.com"
] | noreply@github.com |
f4d4477c62a9b7c90942fce44f0792f8b0c019a1 | c3a84a07539c33040376f2c1e140b1a1041f719e | /wagtail-stubs/admin/views/tags.pyi | 55025030a9dcc5f24d36277a1b5c72bd7e56c396 | [] | no_license | tm-kn/tmp-wagtail-stubs | cc1a4434b7142cb91bf42efb7daad006c4a7dbf4 | 23ac96406610b87b2e7751bc18f0ccd27f17eb44 | refs/heads/master | 2023-01-20T14:41:33.962460 | 2020-11-30T23:15:38 | 2020-11-30T23:15:38 | 317,332,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | pyi | from typing import Any, Optional
def autocomplete(request: Any, app_name: Optional[Any] = ..., model_name: Optional[Any] = ...): ...
| [
"hi@tmkn.org"
] | hi@tmkn.org |
a7d6eceeedbe70b1919e25b13485bd581c001891 | d4eb1003ff7a5bea5d9bbaebed87604c0022caff | /app/views.py | 0a5eb573e65395487efa981dd929900fc89934b7 | [
"Apache-2.0"
] | permissive | VarfoloNikita/dou-jobs-bot | b1e20ba0f2a44d1d37117ced68280bf0473f0782 | 15b263f18fbdda09628a39152b915e3b262b22e7 | refs/heads/master | 2022-12-10T12:59:20.938207 | 2019-12-01T19:54:43 | 2019-12-01T19:54:43 | 221,633,363 | 0 | 0 | Apache-2.0 | 2022-12-08T06:53:48 | 2019-11-14T07:04:24 | Python | UTF-8 | Python | false | false | 1,929 | py | import csv
import io
from typing import Dict, List, Any
from flask import make_response
from app import app, db
from app.models import UserChat, Subscription, City, Position, Stat
DataDict = Dict[str, Any]
def _make_csv(items: List[DataDict]):
names = list(key for key in items[0])
si = io.StringIO()
cw = csv.DictWriter(si, names)
cw.writeheader()
cw.writerows(items)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
@app.route('/')
def index():
return "<h1>Welcome to our server !!</h1>"
@app.route('/users', methods=['GET'])
def users():
chats = UserChat.query.all()
return _make_csv(
items=[
{
'id': chat.id,
'is_admin': chat.is_admin,
'is_active': chat.is_active,
'date_created': chat.date_created.date(),
'user_name': chat.user_name,
}
for chat in chats
]
)
@app.route('/subscriptions', methods=['GET'])
def subscriptions():
items = db.session.query(Subscription, City, Position).join(City).join(Position).all()
return _make_csv(
items=[
{
'id': subscription.id,
'chat_id': subscription.chat_id,
'city': city.name,
'category': position.name,
'date_created': subscription.date_created.date(),
}
for subscription, city, position in items
]
)
@app.route('/actions', methods=['GET'])
def actions():
items = Stat.query.all()
return _make_csv(
items=[
{
'action': stat.action,
'chat_id': stat.chat_id,
'date': stat.date.date(),
}
for stat in items
]
)
| [
"hyzyla@MacBook-Pro-Olena.local"
] | hyzyla@MacBook-Pro-Olena.local |
e0b9a8251d2bf82dc6d619fb9b85385bc66f859b | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Shapely_numpy/source/numpy/ma/tests/test_subclassing.py | 8198c9d3593680cf24b925019336e88936468742 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 13,251 | py | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
divide, asarray, asanyarray, nomask
)
# from numpy.ma.core import (
class SubArray(np.ndarray):
# Defines a generic np.ndarray subclass, that stores some metadata
# in the dictionary `info`.
def __new__(cls,arr,info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
if callable(getattr(super(SubArray, self),
'__array_finalize__', None)):
super(SubArray, self).__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
return
def __add__(self, other):
result = super(SubArray, self).__add__(other)
result.info['added'] = result.info.get('added', 0) + 1
return result
def __iadd__(self, other):
result = super(SubArray, self).__iadd__(other)
result.info['iadded'] = result.info.get('iadded', 0) + 1
return result
subarray = SubArray
class SubMaskedArray(MaskedArray):
"""Pure subclass of MaskedArray, keeping some info on subclass."""
def __new__(cls, info=None, **kwargs):
obj = super(SubMaskedArray, cls).__new__(cls, **kwargs)
obj._optinfo['info'] = info
return obj
class MSubArray(SubArray, MaskedArray):
def __new__(cls, data, info={}, mask=nomask):
subarr = SubArray(data, info)
_data = MaskedArray.__new__(cls, data=subarr, mask=mask)
_data.info = subarr.info
return _data
def _get_series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
_series = property(fget=_get_series)
msubarray = MSubArray
class MMatrix(MaskedArray, np.matrix,):
def __new__(cls, data, mask=nomask):
mat = np.matrix(data)
_data = MaskedArray.__new__(cls, data=mat, mask=mask)
return _data
def __array_finalize__(self, obj):
np.matrix.__array_finalize__(self, obj)
MaskedArray.__array_finalize__(self, obj)
return
def _get_series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
_series = property(fget=_get_series)
mmatrix = MMatrix
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
class CSAIterator(object):
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
see https://github.com/numpy/numpy/issues/4564)
roughly following MaskedIterator
"""
def __init__(self, a):
self._original = a
self._dataiter = a.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
if not isinstance(out, np.ndarray):
out = out.__array__()
out = out.view(type(self._original))
return out
def __setitem__(self, index, value):
self._dataiter[index] = self._original._validate_input(value)
def __next__(self):
return next(self._dataiter).__array__().view(type(self._original))
next = __next__
class ComplicatedSubArray(SubArray):
def __str__(self):
return 'myprefix {0} mypostfix'.format(self.view(SubArray))
def __repr__(self):
# Return a repr that does not start with 'name('
return '<{0} {1}>'.format(self.__class__.__name__, self)
def _validate_input(self, value):
if not isinstance(value, ComplicatedSubArray):
raise ValueError("Can only set to MySubArray values")
return value
def __setitem__(self, item, value):
# validation ensures direct assignment with ndarray or
# masked_print_option will fail
super(ComplicatedSubArray, self).__setitem__(
item, self._validate_input(value))
def __getitem__(self, item):
# ensure getter returns our own class also for scalars
value = super(ComplicatedSubArray, self).__getitem__(item)
if not isinstance(value, np.ndarray): # scalar
value = value.__array__().view(ComplicatedSubArray)
return value
@property
def flat(self):
return CSAIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
def __array_wrap__(self, obj, context=None):
obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context)
if context is not None and context[0] is np.multiply:
obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1
return obj
class TestSubclassing(TestCase):
# Test suite for masked subclasses of ndarray.
def setUp(self):
x = np.arange(5, dtype='float')
mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
# Tests whether the subclass is kept.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
self.assertTrue(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
self.assertTrue(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
self.assertTrue(isinstance(mx._data, np.matrix))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
self.assertTrue(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a mmatrix
self.assertTrue(isinstance(add(mx, mx), mmatrix))
self.assertTrue(isinstance(add(mx, x), mmatrix))
# Result should work
assert_equal(add(mx, x), mx+x)
self.assertTrue(isinstance(add(mx, mx)._data, np.matrix))
self.assertTrue(isinstance(add.outer(mx, mx), mmatrix))
self.assertTrue(isinstance(hypot(mx, mx), mmatrix))
self.assertTrue(isinstance(hypot(mx, x), mmatrix))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
self.assertTrue(isinstance(divide(mx, mx), mmatrix))
self.assertTrue(isinstance(divide(mx, x), mmatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
x = array(arange(5), mask=[0]+[1]*4)
my = masked_array(subarray(x))
ym = msubarray(x)
#
z = (my+1)
self.assertTrue(isinstance(z, MaskedArray))
self.assertTrue(not isinstance(z, MSubArray))
self.assertTrue(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
self.assertTrue(isinstance(z, MaskedArray))
self.assertTrue(isinstance(z, MSubArray))
self.assertTrue(isinstance(z._data, SubArray))
self.assertTrue(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
self.assertTrue(isinstance(ym, MaskedArray))
self.assertTrue(isinstance(ym, MSubArray))
self.assertTrue(isinstance(ym._data, SubArray))
self.assertTrue(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
ym._series._set_mask([0, 0, 0, 0, 1])
assert_equal(ym._mask, [0, 0, 0, 0, 1])
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
self.assertTrue(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
# Checks that masked_array(...,subok=True) preserves the class.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xinfo = [(i, j) for (i, j) in zip(x, m)]
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
self.assertTrue(not isinstance(mxsub, MSubArray))
self.assertTrue(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
self.assertTrue(not isinstance(mxsub, MSubArray))
self.assertTrue(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
self.assertTrue(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
self.assertTrue(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
def test_subclass_items(self):
"""test that getter and setter go via baseclass"""
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
self.assertTrue(isinstance(xcsub[1], ComplicatedSubArray))
self.assertTrue(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
self.assertTrue(isinstance(mxcsub[1], ComplicatedSubArray))
self.assertTrue(mxcsub[0] is masked)
self.assertTrue(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
self.assertTrue(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
self.assertTrue(mxcsub[0] is masked)
self.assertTrue(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
assert_raises(ValueError, xcsub.__setitem__, 1, x[4])
# now that it propagates inside the MaskedArray
assert_raises(ValueError, mxcsub.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4])
mxcsub[1] = xcsub[4]
mxcsub[1:4] = xcsub[1:4]
# also for flattened version (which goes via MaskedIterator)
assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4])
mxcsub.flat[1] = xcsub[4]
mxcsub.flat[1:4] = xcsub[1:4]
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
self.assertTrue(repr(mx).startswith('masked_array'))
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
self.assertTrue(repr(mxsub).startswith(
'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__)))
def test_subclass_str(self):
"""test str with subclass that has overridden str, setitem"""
# first without override
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
def test_pure_subclass_info_preservation(self):
# Test that ufuncs and methods conserve extra information consistently;
# see gh-7122.
arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
diff1 = np.subtract(arr1, arr2)
self.assertTrue('info' in diff1._optinfo)
self.assertTrue(diff1._optinfo['info'] == 'test')
diff2 = arr1 - arr2
self.assertTrue('info' in diff2._optinfo)
self.assertTrue(diff2._optinfo['info'] == 'test')
###############################################################################
if __name__ == '__main__':
run_module_suite()
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
9971b0a591eaafdfd1e53858b527b36c943bf934 | e52aade8f4c8eba26eeb9e57f3555a7a32959bfa | /postWRF/postWRF/clicker.py | b003f800dff7387e8f886071cb1ce523677f3bbe | [] | no_license | qingu/WEM | 9769e70f7181f51f9703842c9f416b21f9090549 | 966e36ae85b9b7529b6b6d50e2a26e3cc0685e5f | refs/heads/master | 2021-01-18T19:35:12.253468 | 2014-11-05T00:50:35 | 2014-11-05T00:50:35 | 21,061,913 | 0 | 1 | null | 2014-11-05T00:50:37 | 2014-06-21T06:17:24 | Python | UTF-8 | Python | false | false | 4,990 | py | import matplotlib as M
M.use('gtkagg')
import matplotlib.pyplot as plt
import numpy as N
import collections
import pdb
# from figure import Figure
import colourtables as ct
from scales import Scales
from figure import Figure
from defaults import Defaults
class Clicker(Figure):
# def __init__(self,config,wrfout,ax=0):
def __init__(self,config,wrfout,data=0,fig=0,ax=0):
# import pdb; pdb.set_trace()
self.C = config
self.D = Defaults()
self.W = wrfout
if isinstance(fig,M.figure.Figure):
self.fig = fig
self.ax = ax
else:
super(Clicker,self).__init__(config,wrfout,fig=fig,ax=ax)
self.bmap,self.x,self.y = self.basemap_setup()
if isinstance(data,N.ndarray):
# Lazily assuming it's reflectivity
S = Scales('cref',2000)
self.overlay_data(data,V=S.clvs,cmap=S.cm)
def click_x_y(self,plotpoint=0):
"""
plotpoint : boolean. If yes, plot point.
"""
# self.plotpoint = plotpoint
# self.fig.canvas.mpl_connect('pick_event',self.onpick)
# self.point = M.patches.Circle((0,0),radius=1, color='g')
# self.ax.add_patch(self.point)
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release_point)
plt.show(self.fig)
def draw_box(self):
self.rect = M.patches.Rectangle((0,0),1,1)
self.ax.add_patch(self.rect)
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release_box)
plt.show(self.fig)
def draw_line(self):
self.line = M.lines.Line2D((0,0),(1,1))
self.ax.add_line(self.line)
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release_line)
plt.show(self.fig)
def on_press(self, event):
print 'press'
self.x0 = event.xdata
self.y0 = event.ydata
def on_release_point(self,event):
# self.point.set_xy((self.x0,self.y0))
if hasattr(self,'scatt'):
if isinstance(self.scatt, M.collections.PathCollection):
self.scatt.remove()
self.scatt = self.ax.scatter(self.x0,self.y0,marker='x')
self.ax.figure.canvas.draw()
def on_release_box(self, event):
print 'release'
self.x1 = event.xdata
self.y1 = event.ydata
self.rect.set_width(self.x1 - self.x0)
self.rect.set_height(self.y1 - self.y0)
self.rect.set_xy((self.x0, self.y0))
self.ax.figure.canvas.draw()
def on_release_line(self, event):
print 'release'
self.x1 = event.xdata
self.y1 = event.ydata
# self.rect.set_width(self.x1 - self.x0)
# self.rect.set_height(self.y1 - self.y0)
self.line.set_data((self.x0, self.x1),(self.y0,self.y1))
self.ax.figure.canvas.draw()
def onpick(self,event):
artist = event.artist
mouseevent = event.mouseevent
self.x = mouseevent.xdata
self.y = mouseevent.ydata
def overlay_data(self,data,V=0,cmap=0):
xlen = data.shape[1]
ylen = data.shape[0]
kwargs = {}
if isinstance(V,N.ndarray):
kwargs['levels'] = V
kwargs['cmap'] = cmap
kwargs['extent'] = (0,xlen,0,ylen)
kwargs['picker'] = 5
self.cf = self.bmap.contourf(self.x,self.y,data,**kwargs)
# self.fig.colorbar(cf,ax=self.ax,shrink=0.5,orientation='horizontal')
# pdb.set_trace()
return
def set_box_width(self,X):
"""
Ask user to specify a width that is normal to the
cross-section X. The plot will show with the box displayed.
If the user is not happy, they can try again.
"""
plt.show(self.fig)
user_is_happy = 0
while not user_is_happy:
self.km = int(raw_input("Specify line-normal width (km): "))
if not isinstance(self.km,int):
print("Value is not integer.")
raise Exception
self.rect = M.patches.Rectangle((self.x0,self.y0),X.hyp_pts,X.angle)
self.ax.add_patch(self.rect)
self.ax.figure.canvas.draw()
plt.show(self.fig)
while True:
doesitwork = raw_input("Does this work? (y/n/x): ")
if doesitwork == 'y':
user_is_happy = 1
break
elif doesitwork == 'n':
break
elif doesitwork == 'x':
raise Exception
else:
print("Try again.")
| [
"john.rob.lawson@googlemail.com"
] | john.rob.lawson@googlemail.com |
21dc53bea9ea91e2d721d330497d4b35fae7d207 | 5ac952d2f0754d184ffc74ee7cac947524df5b84 | /main.py | 287116f8f93df9e548413ea7b3ffb5119e01d41a | [] | no_license | Goodguyr/App1 | 8f3dc1fb0f0fce0423c7af04fb091bc38f3a4067 | 2874c163d694c701f7f0c50e6c36efe982143ea4 | refs/heads/master | 2020-09-01T12:53:09.635129 | 2019-11-24T19:10:24 | 2019-11-24T19:10:24 | 218,962,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from bottle import route, run, static_file
import json
import csv
import transport
@route("/")
def root():
return static_file("index.html", "./")
@route("/index.js")
def index():
return static_file("index.js", "./")
@route("/routes/riga")
def data():
return transport.getStopData("riga_bus_13_a-b")
run(host="0.0.0.0", port=8080) | [
"rrigachov@gmail.com"
] | rrigachov@gmail.com |
2155e9730a7adb5594c0a9c0e5138143f70e3f0e | f8cf0f8d3ca1784b59fff380b99c4fa4da225389 | /ceshi/configparser/test.py | 4369761d2860981c381117b763a2d97b2035bd9f | [] | no_license | loveguan/mysite | 6f3c10c9bd1780a6a3c789c03ef66a10b186da92 | 7217b0e111626af0e6afddd0bc405705cf9641ca | refs/heads/master | 2021-01-01T20:49:45.367058 | 2018-02-05T14:19:42 | 2018-02-05T14:19:42 | 98,939,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: JOJ
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: zhouguanjie@qq.com
@software: JOJ
@file: test.py
@time: 2017/12/14 15:42
@desc: 建立配置文件
'''
import configparser
config=configparser.ConfigParser()
config["DEFAULT"]={'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini','w') as configfile:
config.write(configfile) | [
"zhouguanjie@qq.com"
] | zhouguanjie@qq.com |
642f0d8d9baa31d95a3efe1954b85816c6b5d713 | 1d2635683558694d8f7d68ede9e42f0fd0a45364 | /resources/user.py | 94ef007b80933fe4941ba85c2473c1660c14883c | [] | no_license | soringherghisan/REST-API-FLASK_1 | 91d10d57584186fdc15d774d69431e476ff0fc99 | 4fd4ace5e3ee43877e4b147517a1ae7afcfc953b | refs/heads/master | 2023-04-09T03:30:21.474031 | 2021-04-22T13:14:28 | 2021-04-22T13:14:28 | 290,774,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | """
- User class
- rather than using a dict, he chose to store info in an Obj.
"""
import sqlite3
from flask_restful import Resource, reqparse
from models.UserModel import UserModel
# resource we use to sign up
class UserRegister(Resource):
# reqparse
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True, help='YOU FORGOT THE usenrmae')
parser.add_argument('password', type=str, required=True, help='YOU FORGOT THE password')
def post(self):
data = UserRegister.parser.parse_args()
# check if username already exists
if UserModel.find_by_username(username=data['username']):
return {'messagio': 'ALREADU EXOSTS !'}, 400 # 201 - response code - created
user = UserModel(**data) # unpacking with **
user.save_to_db()
return {'messagio': 'User was created succesfuly !'}, 201 # 201 - response code - created
| [
"sorin.gherghisan@gmail.com"
] | sorin.gherghisan@gmail.com |
020965ab409130059e4fb9e1e3a6cf4d39e75232 | 864755f7d733351b205e460ec54a5f6d13050037 | /devilry/devilry_settings/views.py | 602dd5a1b02bebe44232137c80586840865b0340 | [] | permissive | aless80/devilry-django | 27fc14b7bb7356f5f9d168e435a84e7bb43a682a | 416c262e75170d5662542f15e2d7fecf5ab84730 | refs/heads/master | 2020-05-20T12:22:09.255393 | 2019-05-19T21:06:57 | 2019-05-19T21:06:57 | 185,568,847 | 0 | 0 | BSD-3-Clause | 2019-05-08T08:53:52 | 2019-05-08T08:53:51 | null | UTF-8 | Python | false | false | 753 | py | from django.http import HttpResponse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
@login_required
def missing_setting(request, setting):
message = """
You have been redirected to this view because your local Devilry system administrator
have not set the <strong>{setting}</strong>-setting. Please tell them to set it.""".format(setting=setting)
return HttpResponse('<html><body>{message}</body></html>'.format(message=message))
def urlsetting_or_unsetview(settingname):
setting = getattr(settings, settingname, None)
if setting:
return setting
else:
return reverse('devilry_settings_missing_setting', args=(settingname,))
| [
"post@espenak.net"
] | post@espenak.net |
a4fb633f599399ee7f4beb516ef6db7c3aa01107 | 99be8a0186e74031e34a7cb8a12348ece12c004f | /pecan_model2/controllers/root.py | 8f6e58def822e2b9d3d6b589cb811a5c61da99de | [] | no_license | bspeng922/pecan_model2 | 1e535e7e1eee1156f3847ce12f86128efe425c21 | 86cad2b9d4076952a5832bb1693535237a24f3aa | refs/heads/master | 2020-06-18T09:00:38.574844 | 2016-12-01T01:20:52 | 2016-12-01T01:20:52 | 75,148,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | import pecan
from pecan import conf
from pecan import expose, redirect
from webob.exc import status_map
from pecan_model2.model import Session, MetaData
from pecan_model2.model.user import User
class RootController(object):
@expose(generic=True, template='json')
def index(self):
query = Session.query(User)
users = query.all()
names = [user.name for user in users]
return {"users": users}
@expose('json')
@index.when(method='POST')
def index_post(self):
username = pecan.request.POST.get('username')
password = pecan.request.POST.get('password')
email = pecan.request.POST.get('email')
user = User()
user.name = username
user.password = password
if email:
user.email = email
Session.add(user)
return {"message": "OKKKKK"}
@expose('error.html')
def error(self, status):
try:
status = int(status)
except ValueError: # pragma: no cover
status = 500
message = getattr(status_map.get(status), 'explanation', '')
return dict(status=status, message=message)
| [
"pystack@yahoo.com"
] | pystack@yahoo.com |
67cf9ed24ac96ff7d986f537d288202f3754ba59 | 1f885b2ade4eeb4e438b851c95c97a189104d75d | /FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/pggan_tf_official/config.py | 65b1947e385b2d6f84121bbed17b72927221bc3d | [
"MIT"
] | permissive | CSID-DGU/2020-1-OSSP1-ninetynine-2 | 267cc077163865f64df4832f5b33f6570a1fffb2 | b1824254882eeea0ee44e4e60896b72c51ef1d2c | refs/heads/master | 2022-11-06T19:21:47.756150 | 2020-06-29T08:27:24 | 2020-06-29T08:27:24 | 261,089,766 | 1 | 3 | NOASSERTION | 2020-06-27T20:46:59 | 2020-05-04T05:39:00 | Python | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:27f886a21c94d3f4888c7ce341c3c2f7d0002ea43a7cdc8a004ecd3de711e97d
size 12894
| [
"kluiop1@naver.com"
] | kluiop1@naver.com |
0005f97867cc1338b16608be7f85429094de61d8 | 9a3532ec5a4079b20902f61c79f6264b21341e30 | /venv/bin/easy_install | 18a08ede3cdc470487ff8471c8a572093dacbd94 | [] | no_license | fareniuk/django_webapp | e02d98d2785116e20091386d358a236b06d3edb4 | e22c0d76dd46d5da989826f43a9bc3289652c82b | refs/heads/master | 2022-05-20T03:18:02.864443 | 2019-07-07T00:41:16 | 2019-07-07T00:41:16 | 195,595,006 | 0 | 0 | null | 2022-04-22T21:51:29 | 2019-07-07T00:41:32 | Python | UTF-8 | Python | false | false | 438 | #!/home/frank/cursor/django_homework/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"fareniuk@gmail.com"
] | fareniuk@gmail.com | |
95a5575a9d0d210d3b7be2c99aeb587c9f0a2c40 | d809fcb6e854f5ff740ac36e528fe1020dda1e9b | /soal 5.py | 110fd279989ed846d39124cefe494e88dfa469c2 | [] | no_license | aowenb/Purwadhika-Excercise | befa5baa2a4c62e5d86bff6795da115f52549ae1 | d837611dc036bed4b2990d4680a6c40e692991f6 | refs/heads/main | 2023-03-25T21:44:19.511376 | 2021-03-16T13:03:29 | 2021-03-16T13:03:29 | 348,348,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | x = input('masukkan sebuah kalimat : ')
print('kalimat original : ',x)
def counter(x):
up = []
low = []
for i in x[::1]:
if i.isupper() == True:
up.append(i)
else:
low.append(i)
for i in low:
if i == ' ':
low.remove(' ')
return len(up), len(low)
# 0 adalah Kapital dan 1 adalah non kapital
print('banyaknya Jumlah Huruf Kapital Di Kalimat adalah ', counter(x)[0])
print('banyaknya Jumlah Huruf Kapital Di Kalimat adalah ', counter(x)[1])
| [
"noreply@github.com"
] | noreply@github.com |
6f15e3cd583c011bb562a0b53d54bb954df4bb24 | 473507d7540ad1ee5ae2670ac18ace05cd50f6fa | /Math/excel_sheet_column_number.py | 51d65533812a517cb65c1ea1328ec16f33f05486 | [] | no_license | JunctionChao/LeetCode | 4558c9d053f4a4d003903d08fade9fd93e6d9658 | 10daf38e673e69922f4be7eadf4054810da8ae13 | refs/heads/master | 2023-02-04T06:59:35.833078 | 2020-12-18T14:32:03 | 2020-12-18T14:32:03 | 322,617,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Date : 2019-09-19
# Author : Yuanbo Zhao (chaojunction@gmail.com)
def titleToNumber(s: str) -> int:
i = 0
result = 0
for char in s[::-1]:
r = ord(char) - ord('A') + 1
result += r * 26**i
i += 1
return result
if __name__ == '__main__':
print(titleToNumber('AA'))
| [
"1429004361@qq.com"
] | 1429004361@qq.com |
bfcb215c0101de32ac489a06f3f9e7b05e29115f | 8c1885dfe0f9a39daab9f27de3a31ad1b1801b21 | /md/exercise/code/001.py | 30a727c910a6d46bc3e4b91662710c91dbd99070 | [] | no_license | liushilive/github_exercise_python | 291d54a5a6e3b456be0a9a90c68bd6e7e0ec46f8 | 15ada378bff5d7d2ca8a4f4599ebb795d41f2cce | refs/heads/master | 2022-02-25T21:06:01.224379 | 2022-02-11T06:33:23 | 2022-02-11T06:33:23 | 129,377,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | n = 0
for i in range(1, 5):
for j in range(1, 5):
for k in range(1, 5):
if (i != k) and (i != j) and (j != k):
print(i, j, k)
n += 1
print(f"共计 {n} 个")
| [
"liushilive@outlook.com"
] | liushilive@outlook.com |
8c378658557793793b299b3e94f3a31b04471d85 | 313869ac13ee6cfdaf2de5cb76adf3dec981513f | /venv/Lib/site-packages/pandas/tests/frame/test_api.py | 811391dbf141f1dda7989b8bb7ea694ea5b41081 | [] | no_license | praful-pra1/Machine-Learning-GMCA | c4a5a4fa49b17bd0461d17b40dc169970ee2acde | f93dcf2b8557be4c57ea99f4e8a3756140d2ba6c | refs/heads/master | 2022-12-25T01:09:08.318614 | 2020-10-04T04:57:45 | 2020-10-04T04:57:45 | 301,042,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,820 | py | from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
import pandas._testing as tm
class TestDataFrameMisc:
@pytest.mark.parametrize("attr", ["index", "columns"])
def test_copy_index_name_checking(self, float_frame, attr):
# don't want to be able to modify the index stored elsewhere after
# making a copy
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_column_name_contains_unicode_surrogate(self):
# GH 25509
colname = "\ud83d"
df = DataFrame({colname: []})
# this should not crash
assert colname not in dir(df)
assert df.columns[0] == colname
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert isinstance(tup3, tuple)
if PY37:
assert hasattr(tup3, "_fields")
else:
assert not hasattr(tup3, "_fields")
# GH 28282
df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}])
result_254_columns = next(df_254_columns.itertuples(index=False))
assert isinstance(result_254_columns, tuple)
assert hasattr(result_254_columns, "_fields")
df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}])
result_255_columns = next(df_255_columns.itertuples(index=False))
assert isinstance(result_255_columns, tuple)
# Dataframes with >=255 columns will fallback to regular tuples on python < 3.7
if PY37:
assert hasattr(result_255_columns, "_fields")
else:
assert not hasattr(result_255_columns, "_fields")
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_to_numpy(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.randn(4, 3)
df = pd.DataFrame(arr)
assert df.values.base is arr
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is not arr
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame(
{"X": [1, 2]}, index=[[pd.NaT, pd.Timestamp("20130101")], ["a", "b"]]
)
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
DataFrame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
@async_mark()
async def test_tab_complete_warning(self, ip):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; df = pd.DataFrame()"
await ip.run_code(code)
# TODO: remove it when Ipython updates
# GH 33567, jedi version raises Deprecation warning in Ipython
import jedi
if jedi.__version__ < "0.17.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(
DeprecationWarning, check_stacklevel=False
)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
def test_attrs(self):
df = pd.DataFrame({"A": [2, 3]})
assert df.attrs == {}
df.attrs["version"] = 1
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
def test_cache_on_copy(self):
# GH 31784 _item_cache not cleared on copy causes incorrect reads after updates
df = DataFrame({"a": [1]})
df["x"] = [0]
df["a"]
df.copy()
df["a"].values[0] = -1
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]}))
df["y"] = [0]
assert df["a"].values[0] == -1
tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]}))
@skip_if_no("jinja2")
def test_constructor_expanddim_lookup(self):
# GH#33628 accessing _constructor_expanddim should not
# raise NotImplementedError
df = DataFrame()
inspect.getmembers(df)
with pytest.raises(NotImplementedError, match="Not supported for DataFrames!"):
df._constructor_expanddim(np.arange(27).reshape(3, 3, 3))
| [
"prafulpar"
] | prafulpar |
1c7921e51af31f20cdd9cbfec6e84a050a21e794 | 7704614dd6513f755e3086e3becad56a9f188900 | /lab7.1.py | 26058b5e2f204bec1ad562d7d09a42f542533aaa | [] | no_license | Zettroke/python-labs-bmstu-2019-2020 | e5c45aa61ed8ca9f16f2bf8151bbfda6b530e0c0 | db30e3f6afdfdb56b0ccbf88f810a4a70a20fd24 | refs/heads/master | 2021-01-05T05:12:55.106920 | 2020-03-01T20:26:43 | 2020-03-01T20:26:43 | 240,892,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,952 | py | def test_num(n):
if len(n) == 0:
return False
negative = int(n[0] == '-' or n[0] == '+')
dot = n.find('.')
e = n.find('e')
frac_part = ''
exp_part = ''
if dot != -1:
whole_part = n[negative:dot]
elif e != -1:
whole_part = n[negative:e]
else:
whole_part = n[negative:]
if e != -1:
if dot != -1:
frac_part = n[dot + 1:e]
exponent = n[e + 1:]
if len(exponent) > 0:
exp_part = exponent[int(exponent[0] == '-' or exponent[0] == '+'):]
if e != -1 and not exp_part:
exp_part = 'wrong'
elif dot != -1:
frac_part = n[dot + 1:]
res = bool((whole_part or frac_part) and (not whole_part or whole_part.isnumeric()) and (not frac_part or frac_part.isnumeric()) and (
not exp_part or exp_part.isnumeric()))
return res
##
# Медяновский Олег ИУ7-14Б
# Ввести матрицу A(6, 6), получить матрицу B(6, 5) путем вычеркивания главной диагонали.
# Найти столбец с максимальным кол-вом положительных элементов,
# напечатать итоговую матрицу, номер столбца и кол-во элементов.
#
# Ввод размера матрицы
m = 6
mat = []
s = input('Введите размер квадратной матрицы: ')
if s.isnumeric() or (s[0] == '+' and s.isnumeric()):
m = int(s)
else:
print('Введите корректный размер квадратной матрицы')
print('Введите матрицу {}х{}:'.format(m, m))
# Ввод матрицы
while len(mat) < m:
l = input('{}: '.format(len(mat) + 1)).split()
is_num = True
for i in l:
is_num = is_num and test_num(i)
if len(l) != m or not is_num:
if not is_num:
print('Введите корректные числа!')
else:
print('Введите строку из {} элементов!'.format(m))
else:
mat.append(list(map(float, l)))
# Удаление главной диагонали
for i in range(m):
del mat[i][i]
# Подсчет полоительных элементов0
max_col = (-1, -1) # (col, number)
for col in range(m-1):
cnt = 0
for row in mat:
if row[col] > 0:
cnt += 1
if cnt > max_col[1]:
max_col = (col+1, cnt)
if max_col == (-1, -1):
print('Положительные элементы отсутствуют.')
else:
for row in mat:
for col in row:
print(col, end=' ')
print()
print('{} столбец содержит максимальное кол-во положительных элементов: {}.'
.format(*max_col))
'''
-1 -2 3 4 5 6
-7 8 -9 0 1 -2
3 4 -5 6 -7 -8
-9 -0 1 -2 -3 4
5 -6 -7 -8 9 0
-1 2 3 -4 5 -6
''' | [
"skorostnoy2000@yandex.ru"
] | skorostnoy2000@yandex.ru |
1a1cc50bf9147c51fabd346d295ef62ae3f18ec9 | 49c4e72854f9fce728825b00d9c3ecc7935d3f84 | /python3/standalone/unit-tests/my-project/myproject/test/test_called_with_patch.py | b8a4b29beb4cf8113a0ebd2021b4c8a81fb8f3d5 | [
"MIT"
] | permissive | queirozfcom/python-sandbox | 84f722e8ef54333abf3e60384a6d50826ec7650d | 2938cabd0f3d09b32e6b373f2434fd566eb97ae7 | refs/heads/master | 2023-07-06T04:51:41.000596 | 2023-06-25T19:19:02 | 2023-06-25T19:19:02 | 92,540,278 | 47 | 55 | MIT | 2023-02-15T17:18:39 | 2017-05-26T18:59:32 | Jupyter Notebook | UTF-8 | Python | false | false | 396 | py | from unittest.mock import patch
from myproject.main import function_e
def test_called_with_patch():
with patch("myproject.main.complex_function_with_params", return_value=None) as patched_function:
# function_e converts the params to upper case and
# calls another function with those
function_e("foo", "bar")
patched_function.assert_called_with("FOO", "BAR")
| [
"queirozfcom@gmail.com"
] | queirozfcom@gmail.com |
af5f43307328796d5cd12345995d18c2208c0a79 | fe6fca33a2282bf0f1c741a790dc2c0aa7600160 | /ML/LinearRegression.py | 857152d7ba2a1045e8298baf6bfa1733acc0f79e | [] | no_license | timwford/machine-learning | b94957e32f9b5a73009c6bc1a4098f2a45a5ad22 | 51db628df235bcdc613d37855661985261ade66a | refs/heads/master | 2023-01-18T20:50:41.233099 | 2020-11-27T20:07:30 | 2020-11-27T20:07:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | import numpy as np
from math import sqrt
class LinearRegression:
def __init__(self):
self._slope = 0
self._intercept = 0
self._interval = 0
def _get_mean(self, arr):
"""
Calculates the mean of the array given
:param arr: The given array
:return: Mean
"""
return np.mean(arr)
def fit(self, x, y):
"""
Fits a linear model using least squares.
:param x: The list of independent variables
:param y: The list of dependent variables
:return: bool success
"""
if len(x) != len(y):
print("Error: input list sizes must agree.")
raise AttributeError
x_mean = self._get_mean(x)
y_mean = self._get_mean(y)
top = np.dot(x - x_mean, y - y_mean)
bottom = np.sum(((x - x_mean) ** 2))
self._slope = top / bottom
self._intercept = y_mean - (self._slope * x_mean)
y_hat = self._slope * x + self._intercept
err = np.sum((y - y_hat)**2)
deviation = sqrt(1 / (len(y) - 2) * err)
self._interval = 1.96 * deviation
return True
def get_slope(self):
"""
:return: The slope of the fit line
"""
return self._slope
def get_intercept(self):
"""
:return: The intercept of the fit line.
"""
return self._intercept
def get_interval(self):
return self._interval
| [
"koppril7@gmail.com"
] | koppril7@gmail.com |
270c0eef5fa8ee1de76ff4ec9862c25ec062786c | 3cfe9dbabdac02f21e95d8fd0f0c6a4ea7180165 | /raifhack_ds/model.py | bd604c311fedbc897a7c2a87ea4377d6ac03e9f3 | [] | no_license | ldevyataykina/raifhack | 79f0140bb73829d9665bf121bf51dfbaa80fbb08 | 43a9a79ddc2741217a4103aaf3c0ac42ca915d90 | refs/heads/main | 2023-08-23T19:10:15.574210 | 2021-09-26T10:14:29 | 2021-09-26T10:14:29 | 410,026,494 | 0 | 1 | null | 2021-09-26T10:14:29 | 2021-09-24T16:10:24 | Python | UTF-8 | Python | false | false | 6,229 | py | import typing
import pickle
import pandas as pd
import numpy as np
import logging
from lightgbm import LGBMRegressor
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, OrdinalEncoder
from sklearn.exceptions import NotFittedError
from raif_hack.data_transformers import SmoothedTargetEncoding
logger = logging.getLogger(__name__)
class BenchmarkModel():
"""
Модель представляет из себя sklearn pipeline. Пошаговый алгоритм:
1) в качестве обучения выбираются все данные с price_type=0
1) все фичи делятся на три типа (numerical_features, ohe_categorical_features, ste_categorical_features):
1.1) numerical_features - применяется StandardScaler
1.2) ohe_categorical_featires - кодируются через one hot encoding
1.3) ste_categorical_features - кодируются через SmoothedTargetEncoder
2) после этого все полученные фичи конкатенируются в одно пространство фичей и подаются на вход модели Lightgbm
3) делаем предикт на данных с price_type=1, считаем среднее отклонение реальных значений от предикта. Вычитаем это отклонение на финальном шаге (чтобы сместить отклонение к 0)
:param numerical_features: list, список численных признаков из датафрейма
:param ohe_categorical_features: list, список категориальных признаков для one hot encoding
:param ste_categorical_features, list, список категориальных признаков для smoothed target encoding.
Можно кодировать сразу несколько полей (например объединять категориальные признаки)
:
"""
def __init__(self, numerical_features: typing.List[str],
ohe_categorical_features: typing.List[str],
ste_categorical_features: typing.List[typing.Union[str, typing.List[str]]],
model_params: typing.Dict[str, typing.Union[str,int,float]]):
self.num_features = numerical_features
self.ohe_cat_features = ohe_categorical_features
self.ste_cat_features = ste_categorical_features
self.preprocessor = ColumnTransformer(transformers=[
('num', StandardScaler(), self.num_features),
('ohe', OneHotEncoder(), self.ohe_cat_features),
('ste', OrdinalEncoder(handle_unknown='use_encoded_value',unknown_value=-1),
self.ste_cat_features)])
self.model = LGBMRegressor(**model_params)
self.pipeline = Pipeline(steps=[
('preprocessor', self.preprocessor),
('model', self.model)])
self._is_fitted = False
self.corr_coef = 0
def _find_corr_coefficient(self, X_manual: pd.DataFrame, y_manual: pd.Series):
"""Вычисление корректирующего коэффициента
:param X_manual: pd.DataFrame с ручными оценками
:param y_manual: pd.Series - цены ручника
"""
predictions = self.pipeline.predict(X_manual)
deviation = ((y_manual - predictions)/predictions).median()
self.corr_coef = deviation
def fit(self, X_offer: pd.DataFrame, y_offer: pd.Series,
X_manual: pd.DataFrame, y_manual: pd.Series):
"""Обучение модели.
ML модель обучается на данных по предложениям на рынке (цены из объявления)
Затем вычисляется среднее отклонение между руяными оценками и предиктами для корректировки стоимости
:param X_offer: pd.DataFrame с объявлениями
:param y_offer: pd.Series - цена предложения (в объявлениях)
:param X_manual: pd.DataFrame с ручными оценками
:param y_manual: pd.Series - цены ручника
"""
logger.info('Fit lightgbm')
self.pipeline.fit(X_offer, y_offer, model__feature_name=[f'{i}' for i in range(70)],model__categorical_feature=['67','68','69'])
logger.info('Find corr coefficient')
self._find_corr_coefficient(X_manual, y_manual)
logger.info(f'Corr coef: {self.corr_coef:.2f}')
self.__is_fitted = True
def predict(self, X: pd.DataFrame) -> np.array:
"""Предсказание модели Предсказываем преобразованный таргет, затем конвертируем в обычную цену через обратное
преобразование.
:param X: pd.DataFrame
:return: np.array, предсказания (цены на коммерческую недвижимость)
"""
if self.__is_fitted:
predictions = self.pipeline.predict(X)
corrected_price = predictions * (1 + self.corr_coef)
return corrected_price
else:
raise NotFittedError(
"This {} instance is not fitted yet! Call 'fit' with appropriate arguments before predict".format(
type(self).__name__
)
)
def save(self, path: str):
"""Сериализует модель в pickle.
:param path: str, путь до файла
"""
with open(path, "wb") as f:
pickle.dump(self, f)
@classmethod
def load(self, path: str):
"""Сериализует модель в pickle.
:param path: str, путь до файла
:return: Модель
"""
with open(path, "rb") as f:
model = pickle.load(f)
return model | [
"elena.devyataykina@devyataykina-mb.local"
] | elena.devyataykina@devyataykina-mb.local |
e311f1544fe57c14dbef2be0c389ae2ac85d18a8 | 355f9781b025ef21815d87b61276da39c89c4c46 | /globalmaptiles.py | 30b48d68927fe1d771efc1d1af1fb82fede20bf2 | [
"MIT"
] | permissive | gijs/tile-grab | 5861a7aad9671a91193c368f1bbfc2e6aeb67f00 | 6622c8f36c7c92e87df274ed41b4980727abd963 | refs/heads/master | 2021-01-14T12:02:51.567384 | 2014-04-30T09:34:56 | 2014-04-30T09:34:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,949 | py | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, ty*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def WorldFileParameters(self, tx, ty, zoom):
"Returns world file (affine transofrmation) parameters of the given tile"
bounds = self.TileBounds( tx, ty, zoom)
moriginx = bounds[1] + (self.Resolution(zoom) / 2)
moriginy = bounds[2] - (self.Resolution(zoom) / 2)
return (self.Resolution(zoom), 0.0, 0.0, self.Resolution(zoom) * -1, moriginx, moriginy)
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
#---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize = 256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2**zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def Resolution(self, zoom ):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2**zoom
#return 180 / float( 1 << (8+zoom) )
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2**zoom
return (
tx*256*res - 180,
ty*256*res - 90,
(tx+1)*256*res - 180,
(ty+1)*256*res - 90
)
if __name__ == "__main__":
import sys, os
def Usage(s = ""):
print "Usage: globalmaptiles.py [-profile 'mercator'|'geodetic'] zoomlevel lat lon [latmax lonmax]"
print
if s:
print s
print
print "This utility prints for given WGS84 lat/lon coordinates (or bounding box) the list of tiles"
print "covering specified area. Tiles are in the given 'profile' (default is Google Maps 'mercator')"
print "and in the given pyramid 'zoomlevel'."
print "For each tile several information is printed including bonding box in EPSG:900913 and WGS84."
sys.exit(1)
profile = 'mercator'
zoomlevel = None
lat, lon, latmax, lonmax = None, None, None, None
boundingbox = False
argv = sys.argv
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-profile':
i = i + 1
profile = argv[i]
if zoomlevel is None:
zoomlevel = int(argv[i])
elif lat is None:
lat = float(argv[i])
elif lon is None:
lon = float(argv[i])
elif latmax is None:
latmax = float(argv[i])
elif lonmax is None:
lonmax = float(argv[i])
else:
Usage("ERROR: Too many parameters")
i = i + 1
if profile != 'mercator':
Usage("ERROR: Sorry, given profile is not implemented yet.")
if zoomlevel == None or lat == None or lon == None:
Usage("ERROR: Specify at least 'zoomlevel', 'lat' and 'lon'.")
if latmax is not None and lonmax is None:
Usage("ERROR: Both 'latmax' and 'lonmax' must be given.")
if latmax != None and lonmax != None:
if latmax < lat:
Usage("ERROR: 'latmax' must be bigger then 'lat'")
if lonmax < lon:
Usage("ERROR: 'lonmax' must be bigger then 'lon'")
boundingbox = (lon, lat, lonmax, latmax)
tz = zoomlevel
mercator = GlobalMercator()
mx, my = mercator.LatLonToMeters( lat, lon )
print "Spherical Mercator (ESPG:900913) coordinates for lat/lon: "
print (mx, my)
tminx, tminy = mercator.MetersToTile( mx, my, tz )
if boundingbox:
mx, my = mercator.LatLonToMeters( latmax, lonmax )
print "Spherical Mercator (ESPG:900913) cooridnate for maxlat/maxlon: "
print (mx, my)
tmaxx, tmaxy = mercator.MetersToTile( mx, my, tz )
else:
tmaxx, tmaxy = tminx, tminy
for ty in range(tminy, tmaxy+1):
for tx in range(tminx, tmaxx+1):
tilefilename = "%s/%s/%s" % (tz, tx, ty)
print tilefilename, "( TileMapService: z / x / y )"
gx, gy = mercator.GoogleTile(tx, ty, tz)
print "\tGoogle:", gx, gy
quadkey = mercator.QuadTree(tx, ty, tz)
print "\tQuadkey:", quadkey, '(',int(quadkey, 4),')'
bounds = mercator.TileBounds( tx, ty, tz)
print
print "\tEPSG:900913 Extent: ", bounds
wgsbounds = mercator.TileLatLonBounds( tx, ty, tz)
print "\tWGS84 Extent:", wgsbounds
print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % (
bounds[0], bounds[1], bounds[2], bounds[3], "<your-raster-file-in-epsg900913.ext>", tz, tx, ty)
print
| [
"bill@geomusings.com"
] | bill@geomusings.com |
2d2d82fa1762970898dd62432f0c936f151ec6d9 | f16a2c9bff6f29715b32d79397798a03f649f25b | /blog/apps/articles/admin.py | e2c2bda0373ce7f76ddcb8e72cbb979c5cca1772 | [] | no_license | hsyao/django-blog | 2279a9ebe10837733f83127a0c21f536a1ab9ff5 | 940b04e7f35ce17cefc72ff1e2e297e07056439d | refs/heads/master | 2020-04-18T01:05:39.773176 | 2019-02-15T03:33:37 | 2019-02-15T03:33:37 | 167,105,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from django.contrib import admin
# Register your models here.
from django.contrib.admin import ModelAdmin
from articles.models import Article, Comment, Category, Tag
admin.site.site_header = "博客后台管理系统"
admin.site.site_title = "BMS"
admin.site.index_title = '欢迎使用BMS'
class ArticleInfoAdmnin(ModelAdmin):
list_per_page = 10
fields = ['author','title','desc','category','tag','article_avatar','content']
# 调整列表显示字段
list_display = ['id','title','category','clicks','create_time','update_time']
admin.site.register(Article,ArticleInfoAdmnin)
admin.site.register(Tag)
admin.site.register(Category)
admin.site.register(Comment) | [
"784078345@qq.com"
] | 784078345@qq.com |
4d2f0d2d4ca9497e547201b052c68f244f1836f0 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/units/format/__init__.py | bad82f9313aa459bb138f442ee33f3d00bf7bc45 | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 1,864 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A collection of different unit formats.
"""
# This is pretty atrocious, but it will prevent a circular import for those
# formatters that need access to the units.core module An entry for it should
# exist in sys.modules since astropy.units.core imports this module
import sys
core = sys.modules['astropy.units.core']
from .base import Base
from .generic import Generic, Unscaled
from .cds import CDS
from .console import Console
from .fits import Fits
from .latex import Latex, LatexInline
from .ogip import OGIP
from .unicode_format import Unicode
from .vounit import VOUnit
__all__ = [
'Base', 'Generic', 'CDS', 'Console', 'Fits', 'Latex', 'LatexInline',
'OGIP', 'Unicode', 'Unscaled', 'VOUnit', 'get_format']
def get_format(format=None):
"""
Get a formatter by name.
Parameters
----------
format : str or `astropy.units.format.Base` instance or subclass
The name of the format, or the format instance or subclass
itself.
Returns
-------
format : `astropy.units.format.Base` instance
The requested formatter.
"""
if isinstance(format, type) and issubclass(format, Base):
return format
elif not (isinstance(format, str) or format is None):
raise TypeError(
"Formatter must a subclass or instance of a subclass of {!r} "
"or a string giving the name of the formatter. Valid formatter "
"names are: [{}]".format(Base, ', '.join(Base.registry)))
if format is None:
format = 'generic'
format_lower = format.lower()
if format_lower in Base.registry:
return Base.registry[format_lower]
raise ValueError("Unknown format {!r}. Valid formatter names are: "
"[{}]".format(format, ', '.join(Base.registry)))
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
bb15f4a2be885270cef4e62640a269481b8a03d3 | 8da4a294cd72d36f1f890148b859eee88fe270ac | /dev/local/data/external.py | 1b49501bc88cbb32b8c1f668d3ef04f8e21d4687 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | neerajshukla1911/fastai_dev | 3bf323e80d5594fb4bc543fe73de553e036b2ec2 | 768a5dac135a0f2ea91bc645ba279d3b1c5fd649 | refs/heads/master | 2020-07-16T00:47:11.761475 | 2019-09-01T14:10:32 | 2019-09-01T14:10:32 | 205,684,686 | 0 | 0 | Apache-2.0 | 2019-09-01T14:04:13 | 2019-09-01T14:04:12 | null | UTF-8 | Python | false | false | 8,016 | py | #AUTOGENERATED! DO NOT EDIT! File to edit: dev/04_data_external.ipynb (unless otherwise specified).
__all__ = ['download_url', 'URLs', 'get_path', 'ConfigKey', 'download_data', 'untar_data']
from ..imports import *
from ..test import *
from ..core import *
def download_url(url, dest, overwrite=False, pbar=None, show_progress=True, chunk_size=1024*1024,
timeout=4, retries=5):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress:
pbar = progress_bar(range(file_size), auto_update=False, leave=False, parent=pbar)
try:
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
fname = url.split('/')[-1]
from fastai.datasets import Config
data_dir = dest.parent
print(f'\n Download of {url} has failed after {retries} retries\n'
f' Fix the download manually:\n'
f'$ mkdir -p {data_dir}\n'
f'$ cd {data_dir}\n'
f'$ wget -c {url}\n'
f'$ tar -zxvf {fname}\n'
f' And re-run your code once the download is successful\n')
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
URL = 'http://files.fast.ai/data/examples/'
MDL = 'http://files.fast.ai/models/'
S3 = 'https://s3.amazonaws.com/fast-ai-'
S3_IMAGE = f'{S3}imageclas/'
S3_IMAGELOC = f'{S3}imagelocal/'
S3_NLP = f'{S3}nlp/'
S3_COCO = f'{S3}coco/'
S3_MODEL = f'{S3}modelzoo/'
# main datasets
ADULT_SAMPLE = f'{URL}adult_sample.tgz'
BIWI_SAMPLE = f'{URL}biwi_sample.tgz'
CIFAR = f'{URL}cifar10.tgz'
COCO_SAMPLE = f'{S3_COCO}coco_sample.tgz'
COCO_TINY = f'{URL}coco_tiny.tgz'
HUMAN_NUMBERS = f'{URL}human_numbers.tgz'
IMDB = f'{S3_NLP}imdb.tgz'
IMDB_SAMPLE = f'{URL}imdb_sample.tgz'
ML_SAMPLE = f'{URL}movie_lens_sample.tgz'
MNIST_SAMPLE = f'{URL}mnist_sample.tgz'
MNIST_TINY = f'{URL}mnist_tiny.tgz'
MNIST_VAR_SIZE_TINY = f'{S3_IMAGE}mnist_var_size_tiny.tgz'
PLANET_SAMPLE = f'{URL}planet_sample.tgz'
PLANET_TINY = f'{URL}planet_tiny.tgz'
IMAGENETTE = f'{S3_IMAGE}imagenette.tgz'
IMAGENETTE_160 = f'{S3_IMAGE}imagenette-160.tgz'
IMAGENETTE_320 = f'{S3_IMAGE}imagenette-320.tgz'
IMAGEWOOF = f'{S3_IMAGE}imagewoof.tgz'
IMAGEWOOF_160 = f'{S3_IMAGE}imagewoof-160.tgz'
IMAGEWOOF_320 = f'{S3_IMAGE}imagewoof-320.tgz'
# kaggle competitions download dogs-vs-cats -p {DOGS.absolute()}
DOGS = f'{URL}dogscats.tgz'
# image classification datasets
CALTECH_101 = f'{S3_IMAGE}caltech_101.tgz'
CARS = f'{S3_IMAGE}stanford-cars.tgz'
CIFAR_100 = f'{S3_IMAGE}cifar100.tgz'
CUB_200_2011 = f'{S3_IMAGE}CUB_200_2011.tgz'
FLOWERS = f'{S3_IMAGE}oxford-102-flowers.tgz'
FOOD = f'{S3_IMAGE}food-101.tgz'
MNIST = f'{S3_IMAGE}mnist_png.tgz'
PETS = f'{S3_IMAGE}oxford-iiit-pet.tgz'
# NLP datasets
AG_NEWS = f'{S3_NLP}ag_news_csv.tgz'
AMAZON_REVIEWS = f'{S3_NLP}amazon_review_full_csv.tgz'
AMAZON_REVIEWS_POLARITY = f'{S3_NLP}amazon_review_polarity_csv.tgz'
DBPEDIA = f'{S3_NLP}dbpedia_csv.tgz'
MT_ENG_FRA = f'{S3_NLP}giga-fren.tgz'
SOGOU_NEWS = f'{S3_NLP}sogou_news_csv.tgz'
WIKITEXT = f'{S3_NLP}wikitext-103.tgz'
WIKITEXT_TINY = f'{S3_NLP}wikitext-2.tgz'
YAHOO_ANSWERS = f'{S3_NLP}yahoo_answers_csv.tgz'
YELP_REVIEWS = f'{S3_NLP}yelp_review_full_csv.tgz'
YELP_REVIEWS_POLARITY = f'{S3_NLP}yelp_review_polarity_csv.tgz'
# Image localization datasets
BIWI_HEAD_POSE = f"{S3_IMAGELOC}biwi_head_pose.tgz"
CAMVID = f'{S3_IMAGELOC}camvid.tgz'
CAMVID_TINY = f'{URL}camvid_tiny.tgz'
LSUN_BEDROOMS = f'{S3_IMAGE}bedroom.tgz'
PASCAL_2007 = f'{S3_IMAGELOC}pascal_2007.tgz'
PASCAL_2012 = f'{S3_IMAGELOC}pascal_2012.tgz'
#Pretrained models
OPENAI_TRANSFORMER = f'{S3_MODEL}transformer.tgz'
WT103_FWD = f'{S3_MODEL}wt103-fwd'
WT103_BWD = f'{S3_MODEL}wt103-bwd'
def _get_config():
config_path = Path(os.getenv('FASTAI_HOME', '~/.fastai')).expanduser()
config_file = config_path/'config.yml'
if config_file.exists():
with open(config_file, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)
if 'version' in config and config['version'] == 1: return config
else: config = {}
#File inexistent or wrong version -> going to default
config = {'data_path': str(config_path/'data'),
'archive_path': str(config_path/'archive'),
'model_path': str(config_path/'models'),
'version': 1}
with open(config_file, 'w') as yaml_file:
yaml.dump(config, yaml_file, default_flow_style=False)
return config
ConfigKey = Enum('ConfigKey', 'Data Archive Model')
def get_path(c_key=ConfigKey.Data):
return Path(_get_config()[f"{c_key.name.lower()}_path"])
def _url2path(url, c_key=ConfigKey.Archive):
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key==ConfigKey.Model else 'data')/fname
if local_path.exists(): return local_path
return get_path(c_key)/fname
def download_data(url, fname=None, c_key=ConfigKey.Archive, force_download=False):
"Download `url` to `fname`."
fname = Path(fname or _url2path(url, c_key=c_key))
fname.parent.mkdir(parents=True, exist_ok=True)
if not fname.exists() or force_download:
print(f'Downloading {url}')
download_url(url, fname, overwrite=force_download)
return fname
def _get_check(url):
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
return checks.get(url, None)
def _check_file(fname):
size = os.path.getsize(fname)
with open(fname, "rb") as f:
hash_nb = hashlib.md5(f.read(2**20)).hexdigest()
return [size,hash_nb]
def _add_check(url, fname):
"Internal function to update the internal check file with `url` and check on `fname`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
checks[url] = _check_file(fname)
json.dump(checks, open(Path(__file__).parent/'checks.txt', 'w'), indent=2)
def untar_data(url, fname=None, dest=None, c_key=ConfigKey.Data, force_download=False):
"Download `url` to `fname` if `dest` doesn't exist, and un-tgz to folder `dest`."
default_dest = _url2path(url, c_key=c_key).with_suffix('')
dest = default_dest if dest is None else Path(dest)/default_dest.name
fname = Path(fname or _url2path(url))
if fname.exists() and _get_check(url) and _check_file(fname) != _get_check(url):
print("A new version of this is available, downloading...")
force_download = True
if force_download:
if fname.exists(): os.remove(fname)
if dest.exists(): shutil.rmtree(dest)
if not dest.exists():
fname = download_data(url, fname=fname, c_key=c_key)
if _get_check(url) and _check_file(fname) != _get_check(url):
print(f"File downloaded is broken. Remove {fname} and try again.")
tarfile.open(fname, 'r:gz').extractall(dest.parent)
return dest | [
"sylvain.gugger@gmail.com"
] | sylvain.gugger@gmail.com |
5b450a1b2b66c9b67d26496fb76f7af28ff62da1 | fda52a68a6799dcdd11b4938e4b4e7129ff35eb0 | /No8_atoi.py | 0cd9623eebbff7044d101de52811726212f33caa | [] | no_license | Airstudy/DailyCoding | be1c5b54564e4a75d0fe88f69c14fa070a0962c6 | ca23b169ae747b377edb47a9933dad48ae5af9f9 | refs/heads/master | 2022-12-07T14:46:32.613210 | 2020-09-09T14:20:07 | 2020-09-09T14:20:07 | 274,598,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
dst_s = 0
re_num1 = re.compile('^\s*-{0,1}\d+') # ' -234'
re_num2 = re.compile('^\s*\+{0,1}\d+') # ' +234'
num_dict = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
is_neg = False
match1 = re_num1.search(str) # 不论字符串中有几组数字,只取第一组
match2 = re_num2.search(str) # 针对+123的情况
if match1:
num_str = match1.group()
num_str = num_str.strip()
if num_str.startswith('-'):
is_neg = True
num_str = num_str[1:]
for i in range(0, len(num_str)):
dst_s = dst_s * 10 + num_dict.get(num_str[i], 0)
elif match2:
num_str = match2.group()
num_str = num_str.strip()
if num_str.startswith('+'):
num_str = num_str[1:]
for i in range(0, len(num_str)):
dst_s = dst_s * 10 + num_dict.get(num_str[i], 0)
dst_s = -dst_s if is_neg else dst_s
# 判断溢出
threshold = pow(2,31)
if dst_s >= threshold:
dst_s = threshold -1
elif dst_s < -threshold:
dst_s = -threshold
return dst_s
| [
"noreply@github.com"
] | noreply@github.com |
2889507a6b265ca236e63b711c4c4ea824e2a77d | 56a390eeb33f7f2b52b1ae2668f31c96309b58fd | /irregular_contest/CODEFES_2018_B/b.py | c7b625791db958fcdf26aba9f7f356aae6cdee03 | [] | no_license | uni745e/AtCoder_answers | e578ac3fd12536a6902a0150b48b30f661a0d539 | 5faabebc249a52ed8a41abd59fb4a3f505a696c1 | refs/heads/master | 2020-03-23T17:00:18.630413 | 2018-11-04T15:56:27 | 2018-11-04T15:56:27 | 141,837,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # coding:utf-8
import sys
from operator import itemgetter
input = sys.stdin.readline
INF = float('inf')
def inpl(): return list(map(int, input().split()))
N, X = inpl()
AB = [inpl() for _ in range(N)]
AB.sort(key=itemgetter(1))
AB[-1][0] += X
ans = 0
for a, b in AB:
ans += a * b
print(ans)
| [
"uni745e@gmail.com"
] | uni745e@gmail.com |
01643797342d76c59587dbdf1b32b88f809a09da | e6c8a31020595c59d5f680eeade6dc08789eb1a1 | /oonav.py | addd5c587ce61269b1b776372833760736f20b0e | [] | no_license | nmacgreg/tankDriving | 92244a4213a5c3445026c8dd3d2dedeb1931e6d4 | c54cb6c4896fd1a5edea47d97bf17f72152c5730 | refs/heads/master | 2021-01-10T06:23:24.524666 | 2017-03-26T02:27:42 | 2017-03-26T02:27:42 | 54,276,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | #!/usr/bin/python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import sys
import time
from navControl import navControl
# Enable verbose debug logging if -v is passed as a parameter. This may require attention, within the context of a class...
if len(sys.argv) == 2 and sys.argv[1].lower() == '-v':
logging.basicConfig(level=logging.DEBUG)
# instantiate object
control=navControl()
print '\n *** Ready to run, but need 5 seconds for the IMUs compass to calibrate, despite initial calibration! *** \n'
for countdown in range (5,0,-1):
print '{0:1d}'.format(countdown)
time.sleep(1)
print '\n I am off to the races \n'
def headingTest():
initialHeading = control.getHeading()
print 'Initial heading: {0:0.2F}'.format(initialHeading)
while (True):
direction = control.adjustHeading(initialHeading)
print "Initial heading: {0:0.2F}; we should: {1} ".format(initialHeading, direction)
time.sleep(0.5)
# Let's try some driving!
# Does the compass need further calibration?
#control.turn(180)
#control.turn(180)
# clockwise square
initialHeading = control.getHeading()
driveTo=initialHeading
duration=6
corners=180
for counter in range(1,3):
print '*** Leg: {0:2d}'.format(counter)
if counter == 3:
control.DEBUG=True
control.startDrive(driveTo, duration)
control.turn(corners)
driveTo=driveTo+180
if driveTo > 360:
driveTo=driveTo-360
# I feel like the drift in navigation is catching up with me!
# The first 2 corners are fine, but the 3rd turn is pitiful, the 4th leg is cockeyed, and final turn seems incomplete
| [
"neil.macgregor@ualberta.ca"
] | neil.macgregor@ualberta.ca |
bb9fa8236399987f6814680af95a20481f9fc3d4 | 67b7e6d2c08f08403ec086c510622be48b8d26d8 | /src/test/tinc/tincrepo/mpp/gpdb/tests/queries/basic/exttab/errlog/sql/datagen_first_errors.py | d83f25a62ae3d2681a21b0d929f720f87afcdc31 | [
"Apache-2.0",
"PostgreSQL",
"LicenseRef-scancode-rsa-md4",
"OLDAP-2.8",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"Zlib",
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-mit-modification-obligations",
"OpenSSL",
"MIT",
"LicenseRef-scancode-other-copyleft",
"bzip2-1.0.6"... | permissive | sshyran/gpdb | 41012411d22b0294204dfb0fe67a1f4c8d1ecaf6 | 2d065ecdd2b5535cb42474f17a0ee6592b4e6837 | refs/heads/master | 2023-04-09T14:05:44.030212 | 2016-11-12T08:33:33 | 2016-11-12T08:34:36 | 73,544,159 | 0 | 0 | Apache-2.0 | 2023-04-04T00:30:10 | 2016-11-12T09:43:54 | PLpgSQL | UTF-8 | Python | false | false | 1,166 | py | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import sys
def main(total_rows, number_of_error_rows):
error_count = 0
for i in xrange(number_of_error_rows):
print "error_%s" %str(error_count)
for i in xrange(total_rows - number_of_error_rows):
print "%s|%s_number" %(i,i)
if __name__ == '__main__':
total_rows = 20
error_rows = 0
if len(sys.argv) > 1:
total_rows = int(sys.argv[1])
error_rows = int(sys.argv[2])
main(total_rows, error_rows)
| [
"jyih@pivotal.io"
] | jyih@pivotal.io |
cefc15375b41de5ad4a360e0ff5eb458b1fa12fa | ad8566dace0e4ab4b419b1bb5bc055b095adce72 | /ics/tests/test_adjustment_history.py | f98ab601210cc551c864ed8e1fd23b386b2effaf | [] | no_license | mayanb/wafflecone | 6c844c4c908f7c9b8e41d0d42faeefbfa8b9573e | fcd45a3b745232e52af3bdffa3fab25f13c7464d | refs/heads/staging | 2022-12-14T03:34:14.618001 | 2018-09-26T21:35:52 | 2018-09-26T21:35:52 | 74,408,398 | 1 | 0 | null | 2022-12-08T00:46:05 | 2016-11-21T21:40:59 | Python | UTF-8 | Python | false | false | 6,191 | py | from ics.models import *
from django.urls import reverse
from rest_framework.test import APITestCase
from ics.tests.factories import ProcessTypeFactory, ProductTypeFactory, TaskFactory, AdjustmentFactory, ItemFactory, \
TeamFactory, IngredientFactory, TaskIngredientFactory
import datetime
import mock
from django.utils import timezone
from decimal import Decimal
class TestAdjustmentHistory(APITestCase):
def setUp(self):
self.process_type = ProcessTypeFactory(name='process-name', code='process-code', unit='process-unit')
self.product_type = ProductTypeFactory(name='product-name', code='product-code')
self.url = reverse('adjustment-history')
self.query_params = {
'team': self.process_type.team_created_by.id,
'process_type': self.process_type.id,
'product_type': self.product_type.id,
}
self.past_time = timezone.make_aware(datetime.datetime(2018, 1, 10), timezone.utc)
self.task = TaskFactory(process_type=self.process_type, product_type=self.product_type)
def test_no_items(self):
response = self.client.get(self.url, self.query_params, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
item_summary = response.data[0]
self.assertEqual(item_summary['type'], 'item_summary')
self.assertEqual(item_summary['data']['created_amount'], 0)
self.assertEqual(item_summary['data']['used_amount'], 0)
def test_no_team_error(self):
query_params = {
'process_type': self.process_type.id,
'product_type': self.product_type.id,
}
response = self.client.get(self.url, query_params, format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data[0], 'Request must include "team" query param')
def test_no_process_type_error(self):
query_params = {
'team': self.process_type.team_created_by.id,
'product_type': self.product_type.id
}
response = self.client.get(self.url, query_params, format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0], 'Request must include "process_type" query param')
def test_no_product_type_error(self):
query_params = {
'team': self.process_type.team_created_by.id,
'process_type': self.process_type.id,
}
response = self.client.get(self.url, query_params, format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0], 'Request must include "product_type" query param')
def test_adjustment(self):
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = self.past_time
adjustment = AdjustmentFactory(
process_type=self.process_type,
product_type=self.product_type,
amount=37.3,
explanation='test-adjustment'
)
response = self.client.get(self.url, self.query_params, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 3)
history = response.data[1]
self.assertEqual(history['type'], 'adjustment')
self.assertEqual(history['date'], self.past_time)
self.assertEqual(history['data']['amount'], Decimal('37.300'))
self.assertEqual(history['data']['explanation'], 'test-adjustment')
def test_items(self):
ItemFactory(creating_task=self.task, amount=18.2)
ItemFactory(creating_task=self.task, amount=9.4)
response = self.client.get(self.url, self.query_params, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
item_summary = response.data[0]
self.assertEqual(item_summary['type'], 'item_summary')
self.assertEqual(item_summary['data']['created_amount'], Decimal('27.600'))
self.assertEqual(item_summary['data']['used_amount'], 0)
def test_partial_inputs(self):
partially_used_item = ItemFactory(creating_task=self.task, amount=39.3)
ingredient = IngredientFactory(process_type=self.process_type, product_type=self.product_type)
task_ingredient = TaskIngredientFactory(actual_amount=7.8, ingredient=ingredient)
response = self.client.get(self.url, self.query_params, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
item_summary = response.data[0]
self.assertEqual(item_summary['type'], 'item_summary')
self.assertEqual(item_summary['data']['created_amount'], Decimal('39.300'))
self.assertEqual(item_summary['data']['used_amount'], Decimal('7.800'))
def test_adjustments_and_items(self):
ItemFactory(creating_task=self.task, amount=11.1)
AdjustmentFactory(
process_type=self.process_type,
product_type=self.product_type,
amount=22.4,
)
ItemFactory(creating_task=self.task, amount=33.9)
AdjustmentFactory(
process_type=self.process_type,
product_type=self.product_type,
amount=44.6,
)
ItemFactory(creating_task=self.task, amount=55.5)
response = self.client.get(self.url, self.query_params, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 5)
self.assertEqual(response.data[0]['data']['created_amount'], Decimal('55.500'))
self.assertEqual(response.data[1]['data']['amount'], Decimal('44.600'))
self.assertEqual(response.data[2]['data']['created_amount'], Decimal('33.900'))
self.assertEqual(response.data[3]['data']['amount'], Decimal('22.400'))
self.assertEqual(response.data[4]['data']['created_amount'], Decimal('11.100'))
def test_other_items(self):
wrong_process_task = TaskFactory(process_type=ProcessTypeFactory(), product_type=self.product_type)
wrong_product_task = TaskFactory(process_type=self.process_type, product_type=ProductTypeFactory())
deleted_task = TaskFactory(process_type=self.process_type, product_type=self.product_type, is_trashed=True)
other_team = TeamFactory(name='other-team')
ItemFactory(creating_task=wrong_process_task)
ItemFactory(creating_task=wrong_product_task)
ItemFactory(creating_task=deleted_task)
other_team_item = ItemFactory(creating_task=self.task)
other_team_item.team_inventory = other_team
other_team_item.save()
response = self.client.get(self.url, self.query_params, format='json')
self.assertEqual(len(response.data), 1)
| [
"steven@usepolymer.com"
] | steven@usepolymer.com |
a521964d8e8464d965b9154d8ced63e65c5d7094 | d27af9d58b91b8cd998ac0eb87d980d304ff0670 | /Beginner-Contest/ABC111/ABC111_B.py | 32201fad3a3c038e022bedc7f8262a0eb6645da9 | [] | no_license | mongesan/Atcoder-m0_ngesan-py | 29dd79daab149003ffc8b6b6bad5fa2e7daa9646 | 6654af034d4ff4cece1be04c2c8b756976d99a4b | refs/heads/master | 2023-08-20T19:50:04.547025 | 2021-10-27T12:24:51 | 2021-10-27T12:24:51 | 258,486,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9 | py | #ABC111_B | [
"syun1.mongesan@gmail.com"
] | syun1.mongesan@gmail.com |
c5b55207d0682d5dfcdd7c0805bd25beaf18f333 | 21b236e6ba200124875a9ffa72e37d3c32808de4 | /project_01/project_01/urls.py | 952e33a9b8937251a0effaf83c8ab793bb1fb367 | [] | no_license | Shrisan2/University_Database | ecdb2c4fcc11edbea53ce581a7f530a1fbe39e22 | a7ef62ce373bd02466c0308ae6c965a2ca9f7b81 | refs/heads/master | 2023-07-27T12:32:06.136309 | 2021-05-04T19:47:44 | 2021-05-04T19:47:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | """project_01 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('relational_database.urls'))
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"Shrisan2@live.com"
] | Shrisan2@live.com |
ed62a79b73da4aa69a2a575f9dffa67ad8d30e7a | ee13295fb6aeb432a713a79c3167f8fe67b7f4d8 | /images/models.py | 9bf6f87ea8aa3cdf6ee347904a02d377fb757baf | [] | no_license | jodth07/SOCH-API | 029e404c1328a92215808e627040e711498bc0fe | dd758b85d888c7898f2b4105ea0812fcd903c259 | refs/heads/master | 2021-10-09T08:47:30.657585 | 2018-12-24T18:21:02 | 2018-12-24T18:21:02 | 96,623,946 | 0 | 0 | null | 2018-11-29T01:39:05 | 2017-07-08T14:29:35 | Python | UTF-8 | Python | false | false | 917 | py | # images.models
from django.db import models
from rest_framework import serializers
from datetime import datetime
# from drf_base64.fields import Base64ImageField
class Image(models.Model):
image = models.ImageField(blank=True, null=True)
name = models.CharField(max_length=200)
added = models.DateTimeField(default=datetime.now)
def __str__(self):
return f"{self.name}"
class ImageSerializer(serializers.ModelSerializer):
# image = Base64ImageField(required=False)
class Meta:
model = Image
exclude = ()
class Gallery(models.Model):
name = models.CharField(max_length=200, default="my gallery")
images = models.ManyToManyField(Image)
updated = models.DateField(auto_now=True)
def __str__(self):
return f"{self.name}"
class GallerySerializer(serializers.ModelSerializer):
class Meta:
model = Gallery
exclude = () | [
"yashiro@MacBook-Pro.local"
] | yashiro@MacBook-Pro.local |
f4ca03a62d456d71512e91dac4bb948dbf26c62a | 3bd992a53dced0d9bd6be00d0d8b7d3ee04e9ac9 | /appprincipal/instituicao/migrations/0005_auto_20201209_1605.py | 5671af61cf4db6a93ac206d81f191b4f9770d1f8 | [] | no_license | Netovoski/eng_software | f8eadfe3726021d9884f2ad24f285fa7195eaa94 | 8567051aa3ff918a8fdfec3a30fab3b1ea779d15 | refs/heads/main | 2023-01-29T22:01:50.723105 | 2020-12-14T21:28:20 | 2020-12-14T21:28:20 | 316,643,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # Generated by Django 3.0.6 on 2020-12-09 19:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instituicao', '0004_auto_20201209_0848'),
]
operations = [
migrations.CreateModel(
name='Inst_Parc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_instituicao', models.CharField(max_length=50)),
('endereco', models.CharField(max_length=100, verbose_name='Endereço')),
('cidade', models.CharField(max_length=100, verbose_name='Cidade')),
('estado', models.CharField(max_length=2, verbose_name='Estado')),
('credenciamento', models.IntegerField()),
('mantenedora', models.CharField(max_length=50)),
('descricao', models.CharField(max_length=200, null=True)),
('diretor', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='inst_val',
name='dirigente',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
]
| [
"netovoski@gmail.com"
] | netovoski@gmail.com |
a0924adb3c7da96cb655447e56114e94b508ac22 | b68c92fe89b701297f76054b0f284df5466eb698 | /Other/Companies/Microsoft/BenchmarkMatching.py | a00910c36d86cd8e47e303768e50f53753c80cee | [] | no_license | makrandp/python-practice | 32381a8c589f9b499ab6bde8184a847b066112f8 | 60218fd79248bf8138158811e6e1b03261fb38fa | refs/heads/master | 2023-03-27T18:11:56.066535 | 2021-03-28T04:02:00 | 2021-03-28T04:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,409 | py | '''
Programming challenge description:
We say a portfolio matches the benchmark when the number of shares of each asset in the portfolio matches the number of shares of each asset in the benchmark. Your question is to write a program that determines the transactions necessary to make a portfolio match a benchmark.
A portfolio is a collection of assets such as stocks and bonds. A portfolio could have 10 shares of Vodafone stock, 15 shares of Google stock and 15 shares of Microsoft bonds. A benchmark is also just a collection of assets. A benchmark could have 15 shares of Vodafone stock, 10 shares of Google stock and 15 shares of Microsoft bonds.
A transaction is when you “buy” or “sell” a particular asset of certain asset type (“stock” or “bond”). For instance, you can decide to buy 5 shares of Vodafone stock which, given the portfolio described above, would result in you having 15 shares of Vodafone stock. Correspondingly, you decide to sell 5 shares of Microsoft bonds, which would result in 10 shares of Microsoft bonds in the above portfolio.
Assumptions:
Shares are positive decimals
There will always be at least 1 asset present in the Portfolio and Benchmark
A particular asset can be bond, stock, or both. For example, 5 shares of Microsoft bonds and 10 shares of Microsoft stock can both be present in the portfolio/benchmark
The trades should be sorted in alphabetical order based on the names of the assets; if both bonds and stock are present for an asset, list bonds first
Input:
The first part of the input is the Portfolio holdings (in the format Name,AssetType,Shares where each asset is separated by ‘|’ symbol)
The second part of the input is the Benchmark holdings (in the format Name,AssetType,Shares where each asset is separated by ‘|’ symbol)
Example input: Vodafone,STOCK,10|Google,STOCK,15|Microsoft,BOND,15:Vodafone,STOCK,15|Google,STOCK,10|Microsoft,BOND,15
Note that the two parts are separated by the ‘:’ symbol.
Output:
The output is a list of transactions (separated by new line) in the format TransactionType,Name,AssetType,Shares. Note that the TransactionType should only be BUY or SELL.
Example output: SELL,Google,STOCK,5 BUY,Vodafone,STOCK,5
Test 1
Test Input
Download Test 1 Input
Vodafone,STOCK,10|Google,STOCK,15|Microsoft,BOND,15:Vodafone,STOCK,15|Google,STOCK,10|Microsoft,BOND,15
Expected Output
Download Test 1 Input
SELL,Google,STOCK,5
BUY,Vodafone,STOCK,5
Test 2
Test Input
Download Test 2 Input
Vodafone,STOCK,10|Google,STOCK,15:Vodafone,STOCK,15|Vodafone,BOND,10|Google,STOCK,10
Expected Output
Download Test 2 Input
SELL,Google,STOCK,5
BUY,Vodafone,BOND,10
BUY,Vodafone,STOCK,5
'''
"""
Super quick python answer based off of the information & test cases provided. Could/should heavily improve the string concatenation.
All we do is take the company name plus the shares type (bond or stock) and use that as the key, with the amount as the value. We do this for both the current portfolio and the benchmark. For example "Vodafone,STOCK,10|Google,STOCK,15:Vodafone,STOCK,15|Vodafone,BOND,10|Google,STOCK,10"" gives us:
currentShares = {
"Vodafone,STOCK": 10,
"Google,STOCK": 15
}
benchmarkShares = {
"Vodafone,Stock": 15,
"Vodafone,BOND": 15,
"Google,STOCK": 10
}
Then we iterate through the benchmark shares, outputting based off of the difference and removing the key from current shares. Of the remaining current shares, we simply sell them off.
"""
from typing import List
from collections import defaultdict
class Solution():
def benchmarkMatching(self, data: str) -> List[str]:
# Getting our current share prices
currentShares, benchmarkShares = data.split(':')
currentShares, benchmarkShares = currentShares.split('|'), benchmarkShares.split('|')
currentSharesHash, outputBonds, outputShares = defaultdict(lambda: 0), list(), list()
for c in currentShares:
name, portType, amount = c.split(',')
currentSharesHash[name+","+portType] += int(amount)
for c in benchmarkShares:
name, portType, amount = c.split(',')
diff = int(amount) if name+","+portType not in currentSharesHash else (int(amount) - currentSharesHash[name+","+portType])
if diff != 0:
s = ("SELL" if diff < 0 else "BUY") + "," + name + "," + portType + "," + str(abs(diff))
if portType == "BOND":
outputBonds.append((name,s))
else:
outputShares.append((name,s))
if name+","+portType in currentSharesHash:
del currentSharesHash[name+","+portType]
for c in currentSharesHash.keys():
name, portType = c.split(',')
amount = currentSharesHash[c]
if portType == "BOND":
outputBonds.append((name,s))
else:
outputShares.append((name,s))
# Sorting outputs
output = list()
for bond in sorted(outputBonds):
output.append(bond[1])
for share in sorted(outputShares):
output.append(share[1])
print(output)
s = Solution()
s.benchmarkMatching("Vodafone,STOCK,10|Google,STOCK,15:Vodafone,STOCK,15|Vodafone,BOND,10|Google,STOCK,10")
s.benchmarkMatching("Vodafone,STOCK,10|Google,STOCK,15|Microsoft,BOND,15:Vodafone,STOCK,15|Google,STOCK,10|Microsoft,BOND,15") | [
"awalexweber99@gmail.com"
] | awalexweber99@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.