blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93a759dd1d4ce068810fd67a473fd7f242615fd5 | f2fcf807b441aabca1ad220b66770bb6a018b4ae | /coderbyte/StringMerge.py | aee27511c52f7fc9c13b05cde0262bec9a847235 | [] | no_license | gokou00/python_programming_challenges | 22d1c53ccccf1f438754edad07b1d7ed77574c2c | 0214d60074a3b57ff2c6c71a780ce5f9a480e78c | refs/heads/master | 2020-05-17T15:41:07.759580 | 2019-04-27T16:36:56 | 2019-04-27T16:36:56 | 183,797,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | def StringMerge(string):
stringArr = string.split("*")
arr1 = stringArr[0]
arr2 = stringArr[1]
strBuild = ""
for i in range(len(arr1)):
strBuild+= arr1[i]
strBuild+= arr2[i]
return strBuild
print(StringMerge("123hg*aaabb"))
| [
"gamblecua@gmail.com"
] | gamblecua@gmail.com |
8d16a7b317c421b41cb6db551f09e5d6d244cff9 | 3d8d874ebba15fd065c0a9e74c05e8cd2a24dbe8 | /Week 6 - Joining Data with pandas/19-Concatenate and merge to find common songs.py | 9ad795f5e20ab5a06eff3519aec9c340843f3813 | [] | no_license | RomuloMileris/UCD_Professional_Certificate_in_Data_Analytics | db3e583a6e607e74f3d26b65ba0de59cff64e5a3 | a4a77df69a2440132cfa3e89c4a1674e3e02d086 | refs/heads/master | 2023-02-22T12:48:50.039440 | 2021-01-15T17:06:07 | 2021-01-15T17:06:07 | 319,717,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Merge classic_18_19 with pop_18_19
classic_pop = classic_18_19.merge(pop_18_19, on='tid')
# Using .isin(), filter classic_18_19 rows where tid is in classic_pop
popular_classic = classic_18_19[classic_18_19['tid'].isin(classic_pop['tid'])]
# Print popular chart
print(popular_classic) | [
"romulosmileris@gmail.com"
] | romulosmileris@gmail.com |
b22afa174867cbcdb44387342cabbb4d1d5cce42 | 77a7f05272e82024cffa7ec3bf79b5cb5f90ee3e | /job_search_webapp_project/jobsearch/scraping/dice.py | e308bc35ab1f12a91bf963fe88d208574d75ae8f | [] | no_license | fergusonsa/JobSearch_Django | 5c3ff42cf59cd9380e5b10c3a2a64582382bbb55 | 35d626555b5dad8309358e3fde3c093a7df12702 | refs/heads/master | 2021-09-26T04:42:42.989940 | 2020-09-27T15:07:00 | 2020-09-27T15:07:00 | 85,593,500 | 0 | 0 | null | 2021-09-22T17:41:15 | 2017-03-20T15:26:32 | HTML | UTF-8 | Python | false | false | 20,929 | py | # coding: utf-8
import logging
import datetime
import re
import requests
from bs4 import BeautifulSoup
import geopy.geocoders
import geopy.distance
import geopy.exc
import jobsearch.scraping
import jobsearch.models as models
NUMBER_POSTINGS_PER_REQUEST = 25
MAX_POSTINGS_RETRIEVED = 1000
logger = logging.getLogger(__name__)
def get_max_len_of_dict_vals_for_keys(this_dict, keys):
lengths = [len(this_dict.get(key)) if this_dict.get(key) else 0
for key in keys]
if len(lengths) == 0:
lengths.append(0)
return max(lengths)
def get_max_len_of_dict_vals_for_key_in_list_of_dicts(dict_list, keys):
lengths = [get_max_len_of_dict_vals_for_keys(thisDict, keys)
for thisDict in dict_list]
if len(lengths) == 0:
lengths.append(0)
return max(lengths)
def convert_ago_to_date(ago_str):
try:
if ago_str.lower() in ('just posted', 'today'):
return datetime.datetime.now().strftime('%Y-%m-%d')
else:
value = re.sub('([0-9]+[+]?) (?:minute[s]?|hour[s]?|day[s]?) ago',
r"\1",
ago_str)
if 'minute' in ago_str:
dt = (datetime.datetime.now() - datetime.timedelta(
minutes=int(value))).strftime('%Y-%m-%d %H:%M')
elif 'hour' in ago_str:
dt = (datetime.datetime.now() - datetime.timedelta(
hours=int(value))).strftime('%Y-%m-%d %H:%M')
elif 'today' in ago_str.lower():
dt = datetime.datetime.now().strftime('%Y-%m-%d')
elif 'day' in ago_str:
if value == '30+':
dt = 'over 30 days old'
else:
dt = (datetime.datetime.now() - datetime.timedelta(
days=int(value))).strftime('%Y-%m-%d')
else:
dt = ago_str
except Exception as exc:
logger.error('Could not convert "{}" to a date'.format(ago_str), exc)
dt = ago_str
return dt
def parse_html_page(page_html, source, job_site_details, aliases, geo_locator, home_location, geo_locations,
search_terms='',
verbose=False):
"""
'numberJobsFound': {
'element':'div',
'criteria':{'id':'searchCount'},
'regex': '^Jobs (?:[0-9,]+) to (?:[0-9,]+) of ([0-9,]+)$',
},
"""
logger.debug(('parse_html_page(page_html, job_site_details={}, # aliases={}, geo_locator, home_location, '
'geo_locations, search_terms={}, verbose={})').format(job_site_details, len(aliases),
search_terms, verbose))
soup = BeautifulSoup(page_html, 'html.parser')
total_number_jobs_found = -1
num_jobs_details = job_site_details['parseInfo'].get('numberJobsFound')
if num_jobs_details:
number_postings_elem = soup.find(num_jobs_details['element'],
num_jobs_details['criteria'])
if number_postings_elem:
prop = num_jobs_details.get('property')
if prop:
value = number_postings_elem[prop]
elif hasattr(number_postings_elem, 'text'):
value = number_postings_elem.text
else:
value = number_postings_elem.string
if num_jobs_details.get('regex'):
value = re.sub(num_jobs_details['regex'], r"\1", value)
stripped_val = value.replace(',', '')
if stripped_val.isdigit():
total_number_jobs_found = int(stripped_val)
else:
logger.info(
'For %s site, the numberJobsFound parsing information, "%s", appears to return a non-numeric '
'string "%s" '
% (job_site_details['netLoc'], num_jobs_details['regex'], value))
total_number_jobs_found = 1 # Just to ensure that it is known that at least 1 job found
items = soup.findAll(job_site_details['parseInfo']['parentElement'],
job_site_details['parseInfo']['parentCriteria'])
postings_list = {}
for it in items:
posting_info = {'elem': it, 'searchTerms': search_terms}
for field in job_site_details['parseInfo']['fields'].keys():
field_info = job_site_details['parseInfo']['fields'][field]
# logger.info('looking for field {}'.format(field))
try:
value = None
elem_type = field_info['element']
if elem_type == 'parent':
elem = it
else:
elem = it.find(elem_type, field_info.get('criteria'))
prop = field_info.get('property')
if prop and elem.has_attr(prop):
value = elem[prop]
elif hasattr(elem, 'text'):
value = elem.text
elif elem:
value = elem.string
if field_info.get('regex'):
value = re.sub(field_info['regex'], r"\1", value)
if value:
posting_info[field] = re.sub(r"^\s+|\s+$|\s+(?=\s)", "", value)
except Exception as exc:
logger.error(('Unable to parse posting {} information for item: '
'\n\n{} \n\nError type: {}, val: {}').format(field, it,
type(exc), exc))
if posting_info.get('id'):
if posting_info.get('postedDate'):
posting_info['postedDate'] = convert_ago_to_date(
posting_info['postedDate'])
if posting_info.get('url'):
posting_info['url'] = 'http://{}{}'.format(
job_site_details['netLoc'], posting_info['url'])
if posting_info.get('elem'):
link_elements = posting_info['elem'].findAll('a')
for linkElem in link_elements:
if not linkElem['href'].startswith('http'):
if linkElem['href'].startswith('/'):
linkElem['href'] = 'http://{}{}'.format(
job_site_details['netLoc'], linkElem['href'])
else:
linkElem['href'] = 'http://{}/{}'.format(
job_site_details['netLoc'], linkElem['href'])
if posting_info.get('locale'):
posting_info['locale'] = posting_info['locale'].replace(' , ', ', ')
if jobsearch.scraping.save_posting_to_db(posting_info, source, search_terms, aliases,
geo_locator, home_location, geo_locations):
postings_list[posting_info['id']] = posting_info
if verbose:
logger.info(('Adding item details for id "{}" to list with posted'
' Date {}').format(posting_info['id'],
posting_info.get('postedDate')))
else:
logger.info('Unknown item not being added to list')
return postings_list, len(items), total_number_jobs_found
def sort_by_sub_dict(dictionary, sub_dict_key):
return sorted(dictionary.items(), key=lambda k_v: k_v[1][sub_dict_key])
def login_to_web_site(session, job_site_detail_info):
logger.debug('login_to_web_site(session, job_site_detail_info={})'.format(job_site_detail_info))
if job_site_detail_info.get('username') and job_site_detail_info.get('password'):
login_data = {
'action': 'Login',
'__email': job_site_detail_info['username'],
'__password': job_site_detail_info['password'],
'remember': '1',
'hl': 'en',
# 'continue': '/account/view?hl=en',
}
if job_site_detail_info['nextUrl']:
login_data['next'] = job_site_detail_info['nextUrl']
# res = session.get(job_site_detail_info['loginUrl'], verify=False)
res = session.post(job_site_detail_info['loginUrl'], data=login_data,
headers={"Referer": "HOMEPAGE"})
# if logger.getLogger().getEffectiveLevel() == logger.DEBUG:
logger.debug('session.post("{}", data={}) returns {}'.format(job_site_detail_info['loginUrl'],
login_data, res))
else:
logger.debug('Username "{}" or password "{}" is not set. Not logging in to website {}. Details: {}'.format(
job_site_detail_info.get('username'),
job_site_detail_info.get('password'),
job_site_detail_info.get('loginUrl'),
job_site_detail_info))
def get_postings_from_site_for_multiple_search_terms(source,
job_site_details_info,
search_terms_list,
aliases,
geo_locator,
home_location,
geo_locations,
expected_postings_per_page=10,
max_pages=100, min_pages=4,
verbose=False):
logger.debug(('get_postings_from_site_for_multiple_search_terms(job_site_details_info: {}, search_terms_list: {}, '
'# aliases: {}, expected_postings_per_page={}, geo_locator, home_location: {}, geo_locations,'
'max_pages={}, min_pages={}, verbose={})').format(job_site_details_info,
search_terms_list,
len(aliases),
home_location,
expected_postings_per_page,
max_pages,
min_pages, verbose))
session = requests.Session()
if job_site_details_info['urlSchema'] == 'https':
login_to_web_site(session, job_site_details_info)
for searchTerm in search_terms_list:
get_job_postings_from_site(
source, job_site_details_info, searchTerm, aliases,
geo_locator, home_location, geo_locations,
expected_postings_per_page=expected_postings_per_page,
max_pages=max_pages, min_pages=min_pages, session=session,
verbose=verbose)
def check_for_more_postings(num_postings_on_page, expected_postings_per_page,
num_unique_postings_found_on_page, num_postings_site_found,
start_index, max_pages, min_pages, verbose=False):
"""
Checks criteria for whether to check for more postings on the next page.
Args:
:param num_postings_on_page: the total number of postings found on the page
:param expected_postings_per_page: the number of postings expected to be on the page
:param num_unique_postings_found_on_page: the number of new/unique postings found on the page
:param num_postings_site_found: the total number of postings found on the site
:param start_index: the starting index for the page, should be a multiple of expectedPostingsPerPage
:param max_pages: the maximum number of pages to scrape
:param min_pages: the minimum number of pages to scrape
:param verbose:
"""
logger.debug(('check_for_more_postings(num_postings_on_page={}, expected_postings_per_page={}, '
'num_all_unique_postings_found_on_page={}, num_postings_site_found={}, '
'start_index={}, max_pages={}, min_pages={}, verbose={})').format(num_postings_on_page,
expected_postings_per_page,
num_unique_postings_found_on_page,
num_postings_site_found,
start_index, max_pages,
min_pages, verbose))
if start_index + expected_postings_per_page <= num_postings_site_found:
if num_postings_on_page == expected_postings_per_page:
if (num_unique_postings_found_on_page > 0 and
start_index < expected_postings_per_page * (max_pages - 1)):
return True
elif start_index < expected_postings_per_page * (min_pages - 1):
return True
if verbose:
logger.info(
'numPostingsOnPage ({0}) != expectedPostingsPerPage ({1}) OR numAllUniquePostingsFoundOnPage ({2}) == '
'0 OR startIndex ({3}) < expectedPostingsPerPage ({4}) * (maxPages ({5}) -1) OR startIndex ({3}) < '
'expectedPostingsPerPage ({4}) * (minPages ({6}) -1) '.format(
num_postings_on_page, expected_postings_per_page, num_unique_postings_found_on_page,
start_index, expected_postings_per_page, max_pages, min_pages))
return False
else:
if verbose:
logger.debug('startIndex ({}) + expectedPostingsPerPage ({}) <= numPostingsSiteFound ({}) is False'.format(
start_index, expected_postings_per_page, num_postings_site_found))
return False
def get_job_postings_from_site(source, job_site_details_info, search_term, aliases,
geo_locator, home_location, geo_locations,
expected_postings_per_page=10, max_pages=100,
min_pages=4, session=None, verbose=False):
logger.debug(('get_job_postings_from_site(job_site_details_info={}, search_term={}, # aliases={},'
'geo_locator, home_location, geo_locations, '
'expected_postings_per_page={}, max_pages={}, '
'min_pages={}, session={}, verbose={}').format(job_site_details_info, search_term, len(aliases),
geo_locator, home_location, geo_locations,
expected_postings_per_page, max_pages,
min_pages, session, verbose))
if not session:
session = requests.Session()
if job_site_details_info['urlSchema'] == 'https':
login_to_web_site(session, job_site_details_info)
start_index = 0
url_arguments = {'q': search_term,
'l': job_site_details_info['location'],
job_site_details_info['jobTypeKey']: 'contract',
'sort': 'date',
job_site_details_info['pageIndexKey']: 0,
}
url = '{}://{}/{}'.format(job_site_details_info['urlSchema'],
job_site_details_info['netLoc'],
job_site_details_info['urlPath'])
page = session.get(url, params=url_arguments, verify=False)
# logger.info('\n\n page header content-type info: {}\n'.format(
# page.headers['content-type']))
logger.info('\n\nHere is initial URL to be "scraped": {}\n'.format(page.url))
postings_list, num_postings_on_page, init_total_num_postings = parse_html_page(
page.text, source, job_site_details_info, aliases, geo_locator,
home_location, geo_locations, search_term, verbose)
logger.info('Found {} new of {} postings of {} from url {}'.format(
len(postings_list), num_postings_on_page,
init_total_num_postings, page.url))
while check_for_more_postings(num_postings_on_page, expected_postings_per_page,
len(postings_list), init_total_num_postings,
start_index, max_pages, min_pages, verbose):
start_index += expected_postings_per_page
if job_site_details_info['pageIndexType'] == 'pageCount':
url_arguments[job_site_details_info['pageIndexKey']] += 1
else:
url_arguments[job_site_details_info['pageIndexKey']] = start_index
page = session.get(url, params=url_arguments, verify=False)
postings_list, num_postings_on_page, total_number_jobs_found = parse_html_page(
page.text, job_site_details_info, aliases, geo_locator,
home_location, geo_locations, search_term, verbose)
logger.info('Found {} new of {} postings of {} from url {}'.format(len(postings_list),
num_postings_on_page,
total_number_jobs_found,
page.url))
def scrape_new_job_postings(config=None, geo_locator=None, geo_locations=None, home_location=None):
if not config:
config = jobsearch.scraping.get_configuration()
if not geo_locator:
geo_locator = geopy.geocoders.Nominatim(user_agent="JobSearch")
if not home_location:
# Get coordinates for home
home_location_str = config.get('home_location')
home_location = jobsearch.scraping.get_geo_location(geo_locator, home_location_str)
if not geo_locations:
geo_locations = {} # Cache of geo locations, so do not have to get the same location multiple times
search_terms_list = ['java', 'devops', 'python', ]
client = None
if not client:
logger.warning('Dice posting retrieval not implemented yet!')
return 0
inserted_timestamp = datetime.datetime.now()
for search_term in search_terms_list:
start_index = 0
get_more_postings = True
while get_more_postings:
get_more_postings = False
params = {
'q': search_term,
'jt': 'contract',
'l': "ottawa,ontario,canada",
'userip': "1.2.3.4",
'useragent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2)",
'start': start_index,
'limit': NUMBER_POSTINGS_PER_REQUEST,
'co': 'ca',
'sort': 'date'
}
logger.debug("Getting postings for {} starting index {}".format(search_term, start_index))
search_response = client.search(**params)
logger.debug(search_response)
results_postings = search_response.get('results')
if results_postings:
aliases = models.CompanyAliases.objects.all()
for posting in results_postings:
if jobsearch.scraping.save_posting_to_db(posting, 'dice', search_term, aliases,
geo_locator, home_location, geo_locations):
# If we saved at least 1 posting, then we can try getting more postings from the source!
get_more_postings = True
start_index += len(results_postings)
if not results_postings:
logger.debug('No postings returned from indeed api call, so not trying to get any more!')
break
if start_index > MAX_POSTINGS_RETRIEVED:
logger.debug('Already retrieved max number, {}, of postings, so not trying to get any more!'.format(
MAX_POSTINGS_RETRIEVED))
break
num_new_postings = models.JobPostings.objects.filter(inserted_date__gte=inserted_timestamp).count()
num_saved_aliases = models.CompanyAliases.objects.filter(inserted_date__gte=inserted_timestamp).count()
num_saved_recruiters = models.RecruitingCompanies.objects.filter(date_inserted__gte=inserted_timestamp).count()
logger.debug('# new postings from Dice saved: {} # aliases: {} # recruiters: {} '.format(num_new_postings,
num_saved_aliases,
num_saved_recruiters))
return num_new_postings
| [
"fergusonsa@yahoo.com"
] | fergusonsa@yahoo.com |
883ff69f8f33ab9939a29caa2769bdfcffbdd30c | d6ce7d815af09eea09d8bc2c6f3aaa1b341270cc | /ros_ws/devel/lib/python3/dist-packages/cozmo_rc/srv/__init__.py | 56810697b51cdf3f2704bf4724155ca111994cdd | [] | no_license | danbrick92/cozmoRos | 7e47569e6d9cdd56c84b6cffb5b1fe46453f4b48 | f0345c70f58525d3cbd4227e109b468fa4a07e15 | refs/heads/main | 2023-09-03T21:30:52.839599 | 2021-11-18T01:33:15 | 2021-11-18T01:33:15 | 423,007,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | from ._light_req import *
from ._speaker_req import *
| [
"danbrickner@hotmail.com"
] | danbrickner@hotmail.com |
b3698f59655330a8fa5ab0c4d49985791d562870 | f284021b02f6331888b6d41cfc34d555367b3797 | /bin/easy_install | b9baf79171c742180e9e609b8b8fc87a1bd06354 | [] | no_license | Hubert51/Web_Django | aa8aa771de3085d7bff2fd2b64e8de131b9af537 | f48ad6260291311262a95f71ceda354990518dfc | refs/heads/master | 2020-01-23T21:38:44.507734 | 2016-11-29T02:17:52 | 2016-11-29T02:17:52 | 74,692,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/gengruijie/Django1.10/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gengr@rpi.edu"
] | gengr@rpi.edu | |
3312b51d5e5f1f3726320f7259525ad1936b0f31 | b7320c9d3b36973812314cb6cde6c056f3311972 | /general_test.py | 530964ae76bd5bac390bc7c9f6451797558492a2 | [] | no_license | dhueholt/Misc-bits | 27b75cf85026d0253e53f99197c11bbfc44baba6 | dc7219ea79234e661c5d8f9b113a26edacd94ec5 | refs/heads/master | 2022-02-14T23:16:16.831681 | 2019-07-22T20:18:40 | 2019-07-22T20:18:40 | 198,296,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | """ Test for VIIRS EDR product
Author(s): Daniel Hueholt @dhueholt GitHub
"""
from glob import glob
import matplotlib.pyplot as plt
from satpy import Scene
import cartopy.crs as ccrs
import pdb
FILENAMES = glob('/Users/dhueholt/Documents/Data/CloudMask/20190306/JRR*.nc')
SCN = Scene(reader='viirs_edr_gran', filenames=FILENAMES)
SCN.load(['cloudmaskbinary'])
MY_AREA = SCN['cloudmaskbinary'].attrs['area'].compute_optimal_bb_area({'proj': 'lcc', 'lon_0': -96.,
'lat_0': 39., 'lat_1': 25.,
'lat_2': 25.})
NEW_SCN = SCN.resample(MY_AREA)
# pdb.set_trace()
NEW_SCN.save_dataset('cloudmaskbinary','/Users/dhueholt/Images/cmb.png')
CRS = NEW_SCN['cloudmaskbinary'].attrs['area'].to_cartopy_crs()
lambert_proj = ccrs.LambertConformal()
AX = plt.axes(projection=CRS)
AX.coastlines()
AX.gridlines()
AX.set_global()
plt.imshow(NEW_SCN['cloudmaskbinary'], transform=CRS, extent=CRS.bounds, origin='upper')
# CBAR = plt.colorbar()
# CBAR.set_label('cloudmaskbinary')
# plt.clim(-4,4)
plt.savefig('/Users/dhueholt/Images/reference_1.png')
| [
"dmhuehol@ncsu.edu"
] | dmhuehol@ncsu.edu |
fa423cdd35927ebb9664b82df50cab4322eebe1f | 9f75a1f7e1aa7c9e3bff6aeb261808d596b75fa5 | /agent.py | 8f2c88d0157d6805b784d73414cf0700d6839e53 | [] | no_license | butterkaffee/drlnd_project1 | 2609b97d4122683b25d2c22f452077d2ccde1f71 | 2843211006947f0598e5ba7c23f10e90e399d834 | refs/heads/master | 2020-05-31T06:42:36.942280 | 2019-07-08T17:25:56 | 2019-07-08T17:25:56 | 190,148,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,605 | py | import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork, DuelingDQN
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 256 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
import random
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_atoms = 51
Vmin = -10
Vmax = 10
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class PrioReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed, prob_alpha=0.6):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.prob_alpha = prob_alpha
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.priorities = np.zeros((buffer_size,), dtype=np.float32)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class DuelingAgent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = DuelingDQN(state_size, action_size, seed).to(device)
self.qnetwork_target = DuelingDQN(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local.act(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
if random.uniform(0,1) > 0.99:
print(loss)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
| [
"noreply@github.com"
] | butterkaffee.noreply@github.com |
ba8d9485f114b77345b5bdc786cacf2516b8dba0 | b29dcbf879166592b59e34f0e2bc4918c3ac94a0 | /cart/views.py | 4dfc522e62c9c9e4cc9b815d50b1184bbe3d6954 | [] | no_license | samdasoxide/myshop | ce6d4553af04f1ddf5de1cbfa38ef2ff33ac6b11 | 21115de7748862c8a44ef4dc5a61511ad67746dd | refs/heads/master | 2022-12-14T07:39:13.803686 | 2017-06-20T11:42:30 | 2017-06-20T11:42:30 | 92,954,076 | 0 | 0 | null | 2022-12-07T23:58:40 | 2017-05-31T14:23:18 | JavaScript | UTF-8 | Python | false | false | 1,067 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductFrom
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductFrom(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductFrom(
initial={'quantity': item['quantity'], 'update': True}
)
return render(request, 'cart/detail.html', {'cart': cart})
| [
"samdasoxide@gmail.com"
] | samdasoxide@gmail.com |
11e303d4c69ca7bcedd509112ad6562b91d12bdc | 6a562077f79213f6b2bb89e92d6a16d931268089 | /frappe/core/doctype/data_import/importer_new.py | 6fccbc89ef1f32fc83abe5d05da1ba572513dd91 | [
"MIT"
] | permissive | libracore/frappe | 74fe917b75aa1cfad38c71519914180d5d5f1366 | 92d94a73a3445a252a2828de0053dcce86a18f17 | refs/heads/v12 | 2023-07-17T04:58:08.622228 | 2023-06-28T17:27:33 | 2023-06-28T17:27:33 | 89,392,790 | 6 | 8 | MIT | 2023-08-29T16:29:03 | 2017-04-25T18:19:40 | Python | UTF-8 | Python | false | false | 27,077 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import io
import os
import json
import timeit
import frappe
from datetime import datetime
from frappe import _
from frappe.utils import cint, flt, update_progress_bar
from frappe.utils.csvutils import read_csv_content
from frappe.utils.xlsxutils import (
read_xlsx_file_from_attached_file,
read_xls_file_from_attached_file,
)
from frappe.model import no_value_fields, table_fields
INVALID_VALUES = ["", None]
MAX_ROWS_IN_PREVIEW = 10
# pylint: disable=R0201
class Importer:
def __init__(
self, doctype, data_import=None, file_path=None, content=None, console=False
):
self.doctype = doctype
self.template_options = frappe._dict({"remap_column": {}})
self.console = console
if data_import:
self.data_import = data_import
if self.data_import.template_options:
template_options = frappe.parse_json(self.data_import.template_options)
self.template_options.update(template_options)
else:
self.data_import = None
self.header_row = None
self.data = None
# used to store date formats guessed from data rows per column
self._guessed_date_formats = {}
# used to store eta during import
self.last_eta = 0
# used to collect warnings during template parsing
# and show them to user
self.warnings = []
self.meta = frappe.get_meta(doctype)
self.prepare_content(file_path, content)
self.parse_data_from_template()
def prepare_content(self, file_path, content):
extension = None
if self.data_import and self.data_import.import_file:
file_doc = frappe.get_doc("File", {"file_url": self.data_import.import_file})
content = file_doc.get_content()
extension = file_doc.file_name.split(".")[1]
if file_path:
content, extension = self.read_file(file_path)
if not extension:
extension = "csv"
if content:
self.read_content(content, extension)
self.validate_template_content()
self.remove_empty_rows_and_columns()
def read_file(self, file_path):
extn = file_path.split(".")[1]
file_content = None
with io.open(file_path, mode="rb") as f:
file_content = f.read()
return file_content, extn
def read_content(self, content, extension):
if extension == "csv":
data = read_csv_content(content)
elif extension == "xlsx":
data = read_xlsx_file_from_attached_file(fcontent=content)
elif extension == "xls":
data = read_xls_file_from_attached_file(content)
self.header_row = data[0]
self.data = data[1:]
def validate_template_content(self):
column_count = len(self.header_row)
if any([len(row) != column_count and len(row) != 0 for row in self.data]):
frappe.throw(
_("Number of columns does not match with data"), title=_("Invalid Template")
)
def remove_empty_rows_and_columns(self):
self.row_index_map = []
removed_rows = []
removed_columns = []
# remove empty rows
data = []
for i, row in enumerate(self.data):
if all(v in INVALID_VALUES for v in row):
# empty row
removed_rows.append(i)
else:
data.append(row)
self.row_index_map.append(i)
# remove empty columns
# a column with a header and no data is a valid column
# a column with no header and no data will be removed
header_row = []
for i, column in enumerate(self.header_row):
column_values = [row[i] for row in data]
values = [column] + column_values
if all(v in INVALID_VALUES for v in values):
# empty column
removed_columns.append(i)
else:
header_row.append(column)
data_without_empty_columns = []
# remove empty columns from data
for i, row in enumerate(data):
new_row = [v for j, v in enumerate(row) if j not in removed_columns]
data_without_empty_columns.append(new_row)
self.data = data_without_empty_columns
self.header_row = header_row
def get_data_for_import_preview(self):
out = frappe._dict()
out.data = list(self.rows)
out.columns = self.columns
out.warnings = self.warnings
if len(out.data) > MAX_ROWS_IN_PREVIEW:
out.data = out.data[:MAX_ROWS_IN_PREVIEW]
out.max_rows_exceeded = True
out.max_rows_in_preview = MAX_ROWS_IN_PREVIEW
return out
def parse_data_from_template(self):
columns = self.parse_columns_from_header_row()
columns, data = self.add_serial_no_column(columns, self.data)
self.columns = columns
self.rows = data
def parse_columns_from_header_row(self):
remap_column = self.template_options.remap_column
columns = []
df_by_labels_and_fieldnames = self.build_fields_dict_for_column_matching()
for i, header_title in enumerate(self.header_row):
header_row_index = str(i)
column_number = str(i + 1)
skip_import = False
fieldname = remap_column.get(header_row_index)
if fieldname and fieldname != "Don't Import":
df = df_by_labels_and_fieldnames.get(fieldname)
self.warnings.append(
{
"col": column_number,
"message": _("Mapping column {0} to field {1}").format(
frappe.bold(header_title or "<i>Untitled Column</i>"), frappe.bold(df.label)
),
"type": "info",
}
)
else:
df = df_by_labels_and_fieldnames.get(header_title)
if not df:
skip_import = True
else:
skip_import = False
if fieldname == "Don't Import":
skip_import = True
self.warnings.append(
{
"col": column_number,
"message": _("Skipping column {0}").format(frappe.bold(header_title)),
"type": "info",
}
)
elif header_title and not df:
self.warnings.append(
{
"col": column_number,
"message": _("Cannot match column {0} with any field").format(
frappe.bold(header_title)
),
"type": "info",
}
)
elif not header_title and not df:
self.warnings.append(
{"col": column_number, "message": _("Skipping Untitled Column"), "type": "info"}
)
columns.append(
frappe._dict(
df=df,
skip_import=skip_import,
header_title=header_title,
column_number=column_number,
index=i,
)
)
return columns
def build_fields_dict_for_column_matching(self):
"""
Build a dict with various keys to match with column headers and value as docfield
The keys can be label or fieldname
{
'Customer': df1,
'customer': df1,
'Due Date': df2,
'due_date': df2,
'Item Code (Sales Invoice Item)': df3,
'Sales Invoice Item:item_code': df3,
}
"""
out = {}
table_doctypes = [df.options for df in self.meta.get_table_fields()]
doctypes = table_doctypes + [self.doctype]
for doctype in doctypes:
# name field
name_key = "ID" if self.doctype == doctype else "ID ({})".format(doctype)
name_df = frappe._dict(
{
"fieldtype": "Data",
"fieldname": "name",
"label": "ID",
"reqd": self.data_import.import_type == "Update Existing Records",
"parent": doctype,
}
)
out[name_key] = name_df
out["name"] = name_df
# other fields
meta = frappe.get_meta(doctype)
fields = self.get_standard_fields(doctype) + meta.fields
for df in fields:
fieldtype = df.fieldtype or "Data"
parent = df.parent or self.doctype
if fieldtype not in no_value_fields:
# label as key
label = (
df.label if self.doctype == doctype else "{0} ({1})".format(df.label, parent)
)
out[label] = df
# fieldname as key
if self.doctype == doctype:
out[df.fieldname] = df
else:
key = "{0}:{1}".format(doctype, df.fieldname)
out[key] = df
# if autoname is based on field
# add an entry for "ID (Autoname Field)"
autoname_field = self.get_autoname_field(self.doctype)
if autoname_field:
out["ID ({})".format(autoname_field.label)] = autoname_field
# ID field should also map to the autoname field
out["ID"] = autoname_field
out["name"] = autoname_field
return out
def get_standard_fields(self, doctype):
meta = frappe.get_meta(doctype)
if meta.istable:
standard_fields = [
{"label": "Parent", "fieldname": "parent"},
{"label": "Parent Type", "fieldname": "parenttype"},
{"label": "Parent Field", "fieldname": "parentfield"},
{"label": "Row Index", "fieldname": "idx"},
]
else:
standard_fields = [
{"label": "Owner", "fieldname": "owner"},
{"label": "Document Status", "fieldname": "docstatus", "fieldtype": "Int"},
]
out = []
for df in standard_fields:
df = frappe._dict(df)
df.parent = doctype
out.append(df)
return out
def add_serial_no_column(self, columns, data):
columns_with_serial_no = [
frappe._dict({"header_title": "Sr. No", "skip_import": True})
] + columns
# update index for each column
for i, col in enumerate(columns_with_serial_no):
col.index = i
data_with_serial_no = []
for i, row in enumerate(data):
data_with_serial_no.append([self.row_index_map[i] + 1] + row)
return columns_with_serial_no, data_with_serial_no
def parse_value(self, value, df):
# convert boolean values to 0 or 1
if df.fieldtype == "Check" and value.lower().strip() in ["t", "f", "true", "false"]:
value = value.lower().strip()
value = 1 if value in ["t", "true"] else 0
if df.fieldtype in ["Int", "Check"]:
value = cint(value)
elif df.fieldtype in ["Float", "Percent", "Currency"]:
value = flt(value)
elif df.fieldtype in ["Date", "Datetime"]:
value = self.parse_date_format(value, df)
return value
def parse_date_format(self, value, df):
date_format = self.guess_date_format_for_column(df.fieldname)
if date_format:
return datetime.strptime(value, date_format)
return value
def guess_date_format_for_column(self, fieldname):
""" Guesses date format for a column by parsing the first 10 values in the column,
getting the date format and then returning the one which has the maximum frequency
"""
PARSE_ROW_COUNT = 10
if not self._guessed_date_formats.get(fieldname):
column_index = -1
for i, field in enumerate(self.header_row):
if self.meta.has_field(field) and field == fieldname:
column_index = i
break
if column_index == -1:
self._guessed_date_formats[fieldname] = None
date_values = [
row[column_index] for row in self.data[:PARSE_ROW_COUNT] if row[column_index]
]
date_formats = [guess_date_format(d) for d in date_values]
if not date_formats:
return
max_occurred_date_format = max(set(date_formats), key=date_formats.count)
self._guessed_date_formats[fieldname] = max_occurred_date_format
return self._guessed_date_formats[fieldname]
def import_data(self):
# set user lang for translations
frappe.cache().hdel("lang", frappe.session.user)
frappe.set_user_lang(frappe.session.user)
if not self.console:
self.data_import.db_set("template_warnings", "")
# set flags
frappe.flags.in_import = True
frappe.flags.mute_emails = self.data_import.mute_emails
# prepare a map for missing link field values
self.prepare_missing_link_field_values()
# parse docs from rows
payloads = self.get_payloads_for_import()
# dont import if there are non-ignorable warnings
warnings = [w for w in self.warnings if w.get("type") != "info"]
if warnings:
if self.console:
self.print_grouped_warnings(warnings)
else:
self.data_import.db_set("template_warnings", json.dumps(warnings))
frappe.publish_realtime(
"data_import_refresh", {"data_import": self.data_import.name}
)
return
# setup import log
if self.data_import.import_log:
import_log = frappe.parse_json(self.data_import.import_log)
else:
import_log = []
# remove previous failures from import log
import_log = [l for l in import_log if l.get("success") == True]
# get successfully imported rows
imported_rows = []
for log in import_log:
log = frappe._dict(log)
if log.success:
imported_rows += log.row_indexes
# start import
total_payload_count = len(payloads)
batch_size = frappe.conf.data_import_batch_size or 1000
for batch_index, batched_payloads in enumerate(
frappe.utils.create_batch(payloads, batch_size)
):
for i, payload in enumerate(batched_payloads):
doc = payload.doc
row_indexes = [row[0] for row in payload.rows]
current_index = (i + 1) + (batch_index * batch_size)
if set(row_indexes).intersection(set(imported_rows)):
print("Skipping imported rows", row_indexes)
if total_payload_count > 5:
frappe.publish_realtime(
"data_import_progress",
{
"current": current_index,
"total": total_payload_count,
"skipping": True,
"data_import": self.data_import.name,
},
)
continue
try:
start = timeit.default_timer()
doc = self.process_doc(doc)
processing_time = timeit.default_timer() - start
eta = self.get_eta(current_index, total_payload_count, processing_time)
if total_payload_count > 5:
frappe.publish_realtime(
"data_import_progress",
{
"current": current_index,
"total": total_payload_count,
"docname": doc.name,
"data_import": self.data_import.name,
"success": True,
"row_indexes": row_indexes,
"eta": eta,
},
)
if self.console:
update_progress_bar(
"Importing {0} records".format(total_payload_count),
current_index,
total_payload_count,
)
import_log.append(
frappe._dict(success=True, docname=doc.name, row_indexes=row_indexes)
)
# commit after every successful import
frappe.db.commit()
except Exception:
import_log.append(
frappe._dict(
success=False,
exception=frappe.get_traceback(),
messages=frappe.local.message_log,
row_indexes=row_indexes,
)
)
frappe.clear_messages()
# rollback if exception
frappe.db.rollback()
# set status
failures = [l for l in import_log if l.get("success") == False]
if len(failures) == total_payload_count:
status = "Pending"
elif len(failures) > 0:
status = "Partial Success"
else:
status = "Success"
if self.console:
self.print_import_log(import_log)
else:
self.data_import.db_set("status", status)
self.data_import.db_set("import_log", json.dumps(import_log))
frappe.flags.in_import = False
frappe.flags.mute_emails = False
frappe.publish_realtime("data_import_refresh", {"data_import": self.data_import.name})
return import_log
def get_payloads_for_import(self):
payloads = []
# make a copy
data = list(self.rows)
while data:
doc, rows, data = self.parse_next_row_for_import(data)
payloads.append(frappe._dict(doc=doc, rows=rows))
return payloads
def parse_next_row_for_import(self, data):
"""
Parses rows that make up a doc. A doc maybe built from a single row or multiple rows.
Returns the doc, rows, and data without the rows.
"""
doctypes = set([col.df.parent for col in self.columns if col.df and col.df.parent])
# first row is included by default
first_row = data[0]
rows = [first_row]
# if there are child doctypes, find the subsequent rows
if len(doctypes) > 1:
# subsequent rows either dont have any parent value set
# or have the same value as the parent row
# we include a row if either of conditions match
parent_column_indexes = [
col.index
for col in self.columns
if not col.skip_import and col.df and col.df.parent == self.doctype
]
parent_row_values = [first_row[i] for i in parent_column_indexes]
data_without_first_row = data[1:]
for row in data_without_first_row:
row_values = [row[i] for i in parent_column_indexes]
# if the row is blank, it's a child row doc
if all([v in INVALID_VALUES for v in row_values]):
rows.append(row)
continue
# if the row has same values as parent row, it's a child row doc
if row_values == parent_row_values:
rows.append(row)
continue
# if any of those conditions dont match, it's the next doc
break
def get_column_indexes(doctype):
return [
col.index
for col in self.columns
if not col.skip_import and col.df and col.df.parent == doctype
]
def validate_value(value, df):
if df.fieldtype == "Select":
select_options = df.get_select_options()
if select_options and value not in select_options:
options_string = ", ".join([frappe.bold(d) for d in select_options])
msg = _("Value must be one of {0}").format(options_string)
self.warnings.append(
{
"row": row_number,
"field": df.as_dict(convert_dates_to_str=True),
"message": msg,
}
)
return False
elif df.fieldtype == "Link":
d = self.get_missing_link_field_values(df.options)
if value in d.missing_values and not d.one_mandatory:
msg = _("Value {0} missing for {1}").format(
frappe.bold(value), frappe.bold(df.options)
)
self.warnings.append(
{
"row": row_number,
"field": df.as_dict(convert_dates_to_str=True),
"message": msg,
}
)
return value
return value
def parse_doc(doctype, docfields, values, row_number):
# new_doc returns a dict with default values set
doc = frappe.new_doc(doctype, as_dict=True)
# remove standard fields and __islocal
for key in frappe.model.default_fields + ("__islocal",):
doc.pop(key, None)
for df, value in zip(docfields, values):
if value in INVALID_VALUES:
value = None
value = validate_value(value, df)
if value:
doc[df.fieldname] = self.parse_value(value, df)
check_mandatory_fields(doctype, doc, row_number)
return doc
def check_mandatory_fields(doctype, doc, row_number):
# check if mandatory fields are set (except table fields)
meta = frappe.get_meta(doctype)
fields = [
df
for df in meta.fields
if df.fieldtype not in table_fields
and df.reqd
and doc.get(df.fieldname) in INVALID_VALUES
]
if not fields:
return
if len(fields) == 1:
self.warnings.append(
{
"row": row_number,
"message": _("{0} is a mandatory field").format(fields[0].label),
}
)
else:
fields_string = ", ".join([df.label for df in fields])
self.warnings.append(
{"row": row_number, "message": _("{0} are mandatory fields").format(fields_string)}
)
parsed_docs = {}
for row in rows:
for doctype in doctypes:
if doctype == self.doctype and parsed_docs.get(doctype):
# if parent doc is already parsed from the first row
# then skip
continue
row_number = row[0]
column_indexes = get_column_indexes(doctype)
values = [row[i] for i in column_indexes]
if all(v in INVALID_VALUES for v in values):
# skip values if all of them are empty
continue
columns = [self.columns[i] for i in column_indexes]
docfields = [col.df for col in columns]
doc = parse_doc(doctype, docfields, values, row_number)
parsed_docs[doctype] = parsed_docs.get(doctype, [])
parsed_docs[doctype].append(doc)
# build the doc with children
doc = {}
for doctype, docs in parsed_docs.items():
if doctype == self.doctype:
doc.update(docs[0])
else:
table_dfs = self.meta.get(
"fields", {"options": doctype, "fieldtype": ["in", table_fields]}
)
if table_dfs:
table_field = table_dfs[0]
doc[table_field.fieldname] = docs
# check if there is atleast one row for mandatory table fields
mandatory_table_fields = [
df
for df in self.meta.fields
if df.fieldtype in table_fields and df.reqd and len(doc.get(df.fieldname, [])) == 0
]
if len(mandatory_table_fields) == 1:
self.warnings.append(
{
"row": first_row[0],
"message": _("There should be atleast one row for {0} table").format(
mandatory_table_fields[0].label
),
}
)
elif mandatory_table_fields:
fields_string = ", ".join([df.label for df in mandatory_table_fields])
self.warnings.append(
{
"row": first_row[0],
"message": _("There should be atleast one row for the following tables: {0}").format(fields_string),
}
)
return doc, rows, data[len(rows) :]
def process_doc(self, doc):
import_type = self.data_import.import_type
if import_type == "Insert New Records":
return self.insert_record(doc)
elif import_type == "Update Existing Records":
return self.update_record(doc)
def insert_record(self, doc):
self.create_missing_linked_records(doc)
new_doc = frappe.new_doc(self.doctype)
new_doc.update(doc)
# name shouldn't be set when inserting a new record
new_doc.set("name", None)
new_doc.insert()
if self.meta.is_submittable and self.data_import.submit_after_import:
new_doc.submit()
return new_doc
def create_missing_linked_records(self, doc):
"""
Finds fields that are of type Link, and creates the corresponding
document automatically if it has only one mandatory field
"""
link_values = []
def get_link_fields(doc, doctype):
for fieldname, value in doc.items():
meta = frappe.get_meta(doctype)
df = meta.get_field(fieldname)
if not df:
continue
if df.fieldtype == "Link" and value not in INVALID_VALUES:
link_values.append([df.options, value])
elif df.fieldtype in table_fields:
for row in value:
get_link_fields(row, df.options)
get_link_fields(doc, self.doctype)
for link_doctype, link_value in link_values:
d = self.missing_link_values.get(link_doctype)
if d and d.one_mandatory and link_value in d.missing_values:
# find the autoname field
autoname_field = self.get_autoname_field(link_doctype)
name_field = autoname_field.fieldname if autoname_field else "name"
new_doc = frappe.new_doc(link_doctype)
new_doc.set(name_field, link_value)
new_doc.insert()
d.missing_values.remove(link_value)
def update_record(self, doc):
id_fieldname = self.get_id_fieldname()
id_value = doc[id_fieldname]
existing_doc = frappe.get_doc(self.doctype, id_value)
existing_doc.flags.via_data_import = self.data_import.name
existing_doc.update(doc)
existing_doc.save()
return existing_doc
def export_errored_rows(self):
from frappe.utils.csvutils import build_csv_response
if not self.data_import:
return
import_log = frappe.parse_json(self.data_import.import_log or "[]")
failures = [l for l in import_log if l.get("success") == False]
row_indexes = []
for f in failures:
row_indexes.extend(f.get("row_indexes", []))
# de duplicate
row_indexes = list(set(row_indexes))
row_indexes.sort()
header_row = [col.header_title for col in self.columns[1:]]
rows = [header_row]
rows += [row[1:] for row in self.rows if row[0] in row_indexes]
build_csv_response(rows, self.doctype)
def get_missing_link_field_values(self, doctype):
return self.missing_link_values.get(doctype, {})
def prepare_missing_link_field_values(self):
columns = self.columns
rows = self.rows
link_column_indexes = [
col.index for col in columns if col.df and col.df.fieldtype == "Link"
]
self.missing_link_values = {}
for index in link_column_indexes:
col = columns[index]
column_values = [row[index] for row in rows]
values = set([v for v in column_values if v not in INVALID_VALUES])
doctype = col.df.options
missing_values = [value for value in values if not frappe.db.exists(doctype, value)]
if self.missing_link_values.get(doctype):
self.missing_link_values[doctype].missing_values += missing_values
else:
self.missing_link_values[doctype] = frappe._dict(
missing_values=missing_values,
one_mandatory=self.has_one_mandatory_field(doctype),
df=col.df,
)
def get_id_fieldname(self):
autoname_field = self.get_autoname_field(self.doctype)
if autoname_field:
return autoname_field.fieldname
return "name"
def get_eta(self, current, total, processing_time):
remaining = total - current
eta = processing_time * remaining
if not self.last_eta or eta < self.last_eta:
self.last_eta = eta
return self.last_eta
def has_one_mandatory_field(self, doctype):
meta = frappe.get_meta(doctype)
# get mandatory fields with default not set
mandatory_fields = [df for df in meta.fields if df.reqd and not df.default]
mandatory_fields_count = len(mandatory_fields)
if meta.autoname and meta.autoname.lower() == "prompt":
mandatory_fields_count += 1
return mandatory_fields_count == 1
def get_autoname_field(self, doctype):
meta = frappe.get_meta(doctype)
if meta.autoname and meta.autoname.startswith("field:"):
fieldname = meta.autoname[len("field:") :]
return meta.get_field(fieldname)
def print_grouped_warnings(self, warnings):
warnings_by_row = {}
other_warnings = []
for w in warnings:
if w.get("row"):
warnings_by_row.setdefault(w.get("row"), []).append(w)
else:
other_warnings.append(w)
for row_number, warnings in warnings_by_row.items():
print("Row {0}".format(row_number))
for w in warnings:
print(w.get("message"))
for w in other_warnings:
print(w.get("message"))
def print_import_log(self, import_log):
failed_records = [l for l in import_log if not l.success]
successful_records = [l for l in import_log if l.success]
if successful_records:
print(
"Successfully imported {0} records out of {1}".format(
len(successful_records), len(import_log)
)
)
if failed_records:
print("Failed to import {0} records".format(len(failed_records)))
file_name = '{0}_import_on_{1}.txt'.format(self.doctype, frappe.utils.now())
print('Check {0} for errors'.format(os.path.join('sites', file_name)))
text = ""
for w in failed_records:
text += "Row Indexes: {0}\n".format(str(w.get('row_indexes', [])))
text += "Messages:\n{0}\n".format('\n'.join(w.get('messages', [])))
text += "Traceback:\n{0}\n\n".format(w.get('exception'))
with open(file_name, 'w') as f:
f.write(text)
DATE_FORMATS = [
r"%d-%m-%Y",
r"%m-%d-%Y",
r"%Y-%m-%d",
r"%d-%m-%y",
r"%m-%d-%y",
r"%y-%m-%d",
r"%d/%m/%Y",
r"%m/%d/%Y",
r"%Y/%m/%d",
r"%d/%m/%y",
r"%m/%d/%y",
r"%y/%m/%d",
r"%d.%m.%Y",
r"%m.%d.%Y",
r"%Y.%m.%d",
r"%d.%m.%y",
r"%m.%d.%y",
r"%y.%m.%d",
]
TIME_FORMATS = [
r"%H:%M:%S.%f",
r"%H:%M:%S",
r"%H:%M",
r"%I:%M:%S.%f %p",
r"%I:%M:%S %p",
r"%I:%M %p",
]
def guess_date_format(date_string):
date_string = date_string.strip()
_date = None
_time = None
if " " in date_string:
_date, _time = date_string.split(" ", 1)
else:
_date = date_string
date_format = None
time_format = None
for f in DATE_FORMATS:
try:
# if date is parsed without any exception
# capture the date format
datetime.strptime(_date, f)
date_format = f
break
except ValueError:
pass
if _time:
for f in TIME_FORMATS:
try:
# if time is parsed without any exception
# capture the time format
datetime.strptime(_time, f)
time_format = f
break
except ValueError:
pass
full_format = date_format
if time_format:
full_format += " " + time_format
return full_format
def import_data(doctype, file_path):
i = Importer(doctype, file_path)
i.import_data()
| [
"netchamp.faris@gmail.com"
] | netchamp.faris@gmail.com |
451da3310b48ed6fd08983ee33fda5f2b27b92fd | 24171ea136e2ec211792d1d7644cd5c945a6df35 | /test/41.py | a05e622c42a995a2e87dd72731ed1c176dae0dc4 | [] | no_license | reidevries/codecoach | 1329ab367dc8aa3f3dd76af0b7cbc975a7d67ccd | a6d8e3cf28a6d264b0aa6aa8a44cc315803954b2 | refs/heads/master | 2021-05-23T10:02:53.403344 | 2020-04-05T12:57:24 | 2020-04-05T12:57:24 | 253,233,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | #! /usr/bin/env python2.6
import re
import sys
import os
argv = len(sys.argv)
if argv != 2:
print "usage: ipfun.py <filename>"
sys.exit(1)
argo = sys.argv[1]
if (os.access(argo, os.R_OK) == 0):
print argo, "is not readable"
sys.exit(2)
InFile = open(argo, "r")
ipcheck = r"((([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\:)"
for line in InFile:
validip = re.match(ipcheck, line)
line = line.split('\n')
line = line[0]
if validip:
line2 = line.split(':')
try :
port = int(line2[1])
except:
print line,"- Invalid Port Number"
else:
if ((port > 0) & (port < 32767)):
validport = 1
if port < 1024:
root = 1
print line,"- Valid (root privileges required)"
else:
root = 0
print line,"- Valid"
else:
print line,"- Invalid Port Number"
else:
print line,"- Invalid IP Address"
sys.exit(0) | [
"raeaw@localhost.localdomain"
] | raeaw@localhost.localdomain |
fc56269afc1a9b27972e6ba65f1634e38ca3c907 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/volatil.py | da3fffbd742a2e39d77bda58f2168f2a493c7200 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 586 | py | ii = [('EmerRN.py', 1), ('RogePAV2.py', 2), ('GodwWSL2.py', 1), ('FerrSDO3.py', 1), ('WilbRLW.py', 1), ('ProuWCM.py', 5), ('PettTHE.py', 3), ('PeckJNG.py', 1), ('WilbRLW2.py', 7), ('CarlTFR.py', 2), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('BuckWGM.py', 1), ('GilmCRS.py', 1), ('WestJIT2.py', 1), ('SoutRD2.py', 1), ('MedwTAI2.py', 1), ('BuckWGM2.py', 1), ('WestJIT.py', 2), ('FitzRNS4.py', 2), ('EdgeMHT.py', 1), ('LyttELD3.py', 1), ('BellCHM.py', 1), ('WilbRLW3.py', 1), ('AinsWRR2.py', 1), ('BrewDTO.py', 4), ('FitzRNS2.py', 1), ('LyelCPG3.py', 1), ('BeckWRE.py', 1), ('WordWYR.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
214f9f36330053db1146926c0969362d5663836f | 0eb42eb02dcd217b7c41993a99b3b6628a13a04e | /exponential.py | e2d1af3bcfbc5307cacb4cde47cbd5ed33dd56b4 | [] | no_license | luckysona/shanthiya | d1f29449c9511e33ce382666b53dd35b70534081 | c6d9acfe8e069be5e3c4428e27d4722afd9faa27 | refs/heads/master | 2020-05-25T22:59:50.235797 | 2019-07-23T17:40:53 | 2019-07-23T17:40:53 | 188,025,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | x,y=input().split()
x=int(x)
y=int(y)
if(y==0):
print(x)
else:
print(x**y)
| [
"noreply@github.com"
] | luckysona.noreply@github.com |
014373df5e1938c8b39fd5fcaacbbd0655dfd64d | 4a53ae5afb11850196ac08763afc637a81ce1dbd | /turbo-entabulator/turbo_entabulator/utilities.py | 2e1a8a52db3511f74cc1a59121cdb99fb9f55ff6 | [] | no_license | xinyli-cumulus/TE_update | 5983309ef66f5316f56298fc3d7b1ef4fb0719d7 | 4e5b9299d3159bbcdd715c33cc08820ad2c8e3fb | refs/heads/master | 2021-05-26T10:49:14.229467 | 2020-04-08T14:10:19 | 2020-04-08T14:10:19 | 254,102,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,936 | py | #!/usr/bin/env python3
"""
Turbo-Entabulator utilities.
This file contains utilities used by the Turbo-Entabulator suite that don't
fall under the 'detections' or 'discovery' categories.
"""
# Copyright(c) 2018, 2019, 2020 Cumulus Networks, Inc
# John Fraizer <jfraizer@cumulusnetworks.com>
import json
import os
import random
import re
import sys
from turbo_entabulator.m_logger import logger
def check_dependencies(funcname, required, satisfied): # noqa
"""
Validate that list 'requirements' is a subset of list 'satisfied'.
:param funcname
:param required
:param satisfied
:return bool
"""
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
logger.debug("Checking dependencies: {} for function [{}]."
.format(required, funcname))
if not set(required).issubset(set(satisfied)):
missing = list(set(required).difference(set(satisfied)))
logger.debug("Required dependencies {} for [{}] have not been "
"satisfied!".format(missing, funcname))
return False
else:
logger.debug("Dependencies satisfied.")
return True
def expand_frr_ec(deprecated, satisfied, includes, problems, # noqa
regex_matches):
"""
Try to provide suggestions for ECs from FRR.
:param deprecated:
:param satisfied:
:param includes:
:param problems:
:param regex_matches:
:return:
"""
# Get function name (accesses private sys function, no better way)
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return satisfied, problems, {}
reqs = ['detect_log_sigs']
if not check_dependencies(name, reqs, satisfied):
return satisfied, problems, {}
if 'Uncategorized FRR Error' not in regex_matches:
logger.debug("No matches to look up! Skipping".format(name))
return satisfied, problems, {}
# variable initialization not needed
# db = {}
filename = includes + "/frr/ec.json"
if not os.path.isfile(filename):
logger.debug("Could not open {} .".format(filename))
problems.append('* * * TE CONFIG ERROR * * * Could not find {}! '
'Please verify that Turbo-Entabulator '
'is installed properly.'.format(filename))
return satisfied, problems, {}
logger.debug('Reading in {}...'.format(filename))
with open(filename) as fh:
db = json.load(fh)
fh.close()
# Dict to hold suggestions.
suggestions = []
count = 0
for match in regex_matches['Uncategorized FRR Error']:
_, ec = match.split(' ')
count = count + 1
if count > 1:
suggestions.append('-' * 76)
# Does FRR contain the expanded error description?
if ec in db:
suggestions.append(match + ':\t' + db[ec]['title'])
suggestions.append('Description:\t' + db[ec]['description'])
suggestions.append('Suggestion:\t' + db[ec]['suggestion'])
else:
suggestions.append(match + ':\t' + 'Unknown Error Code')
suggestions.append('Description:\t' + 'Not found in FRR error DB')
suggestions.append('Suggestion:\t' + 'Please File bug with FRR '
'team to add detail for ' +
match)
msg = ('FILE-A-BUG: [' + match + '] not found in FRR Error '
'Codes. Please file a bug with '
'FRR team to have error detail '
'added.')
problems.append(msg)
satisfied.append(name)
# Then, return:
return satisfied, problems, suggestions
def find_frr_path(deprecated, satisfied, support_path): # noqa
# Determine the ?.show_running file we need to parse.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
frr_files = ['frr.show_running', 'quagga.show_running',
'Quagga.show_running', 'zebra.config']
for F in frr_files:
filename = support_path + F
if os.path.isfile(filename):
logger.debug("Found {} .".format(filename))
satisfied.append(name)
return (satisfied, filename)
logger.debug("Unable to find ?.show_running file to parse FRR data!")
return (satisfied, None)
def find_ifquery_path(deprecated, satisfied, support_path): # noqa
# Determine the ifquery file we need to parse.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
ifquery_files = ['ifquery', 'ifquery-a']
for F in ifquery_files:
filename = support_path + F
if os.path.isfile(filename):
logger.debug("Found {} .".format(filename))
satisfied.append(name)
return (satisfied, filename)
logger.debug("Unable to find ifquery file to parse data!")
return (satisfied, None)
def find_support_path(deprecated, satisfied, CL): # noqa
# This function verifies that we can find "support/" or "Support/"
# in the cl_support directory that has been passed to the script. It will
# return the full path to the support directory or False.
# We're at the top of the food-chain here so, we have no dependencies.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = []
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
# We need to verify that the cl_support directory we were passed is
# actually a directory.
if not os.path.isdir(CL):
logger.debug("{} is not a directory!".format(CL))
return (satisfied, None)
satisfied.append('CL')
support_paths = ['Support/', 'support/']
for P in support_paths:
support_path = CL + "/" + P
if os.path.isdir(support_path):
logger.debug("Found {} .".format(support_path))
satisfied.append(name)
return (satisfied, support_path)
else:
logger.debug("{} is not a directory!".format(support_path))
return (satisfied, None)
def generate_report(result, print_logs, print_suggestions): # noqa
"""
Generate human readable report.
"""
if not result:
logger.error("Results are empty! Shit's broke!")
exit(1)
# Common section dividers
section_start_divider = '='*76 + '\n'
section_end_divider = '='*76 + '\n\n'
# Generate the report
interested = ['Script Version', 'hostname', 'eth0_ip', 'uptime',
'cl_support', 'Command line', 'Reason', 'license',
'lsb-release', 'image-release', 'upgraded with apt-get',
'sysinfo', 'platform.detect', 'switch-architecture',
'vendor', 'model', 'cpld_version', 'onie_version', 'bios',
'service_tag', 'chipset', 'ports', 'capabilities', 'caveats',
'datasheet'
]
msg = "[Overview]".center(76, '=') + '\n'
for item in interested:
if 'discovered' in result and item in result['discovered']:
if 'sysinfo' in item:
for item2 in result['discovered'][item]:
msg = msg + ('{:>21}: {}\n'
.format(item2.upper(),
result['discovered'][item][item2]))
elif 'bios' in item:
msg = msg + ('{:>21}: ['.format('BIOS'))
for item2 in result['discovered'][item]:
msg = msg + (' {}: {} '
.format(item2,
result['discovered'][item][item2]))
msg = msg + ' ]\n'
else:
msg = msg + ('{:>21}: {}\n'.format(item.upper(),
result['discovered'][item]))
msg = msg + section_end_divider
# print problems
if 'problems' in result.keys():
msg = msg + "[Problems]".center(76, '=') + '\n'
for item in result['problems']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print warnings
if 'warnings' in result.keys():
msg = msg + "[Warnings]".center(76, '=') + '\n'
for item in result['warnings']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print info
if 'info' in result.keys():
msg = msg + "[Informational]".center(76, '=') + '\n'
for item in result['info']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print logs
if print_logs and 'logs' in result.keys():
if 'problems' in result['logs'].keys():
msg = msg + ('Logs of interest [Problems]:\n')
msg = msg + section_start_divider
for item in result['logs']['problems']:
msg = msg + item + '\n'
msg = msg + section_end_divider
if 'warnings' in result['logs'].keys():
msg = msg + ('Logs of interest [Warnings]:\n')
msg = msg + section_start_divider
for item in result['logs']['warnings']:
msg = msg + item + '\n'
msg = msg + section_end_divider
if 'info' in result['logs'].keys():
msg = msg + ('Logs of interest [Informational]:\n')
msg = msg + section_start_divider
for item in result['logs']['info']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print frr error codes
if print_suggestions and 'suggestions' in result.keys():
msg = msg + ('Expanded FRR Error Codes:\n')
msg = msg + section_start_divider
for item in result['suggestions']:
msg = msg + item + '\n'
msg = msg + section_start_divider
return msg
def glob_to_numbers(glob): # noqa
"""
Given a string containing single numbers and ranges, return a sorted
list of deduplicated integers.
glob - A string of digits and ranges
>>> glob_to_numbers('3-4,7,10-12,17,22,4001-4003,7777,8000-8004')
[3, 4, 7, 10, 11, 12, 17, 22, 4001, 4002, 4003, 7777, 8000, 8001, 8002,
8003, 8004]
"""
assert isinstance(glob, (str)), "glob={0}".format(glob)
# Using split(',') instead of the replacement could yield empty strings in
# the result.
glob_list = glob.replace(',', ' ').split()
numbers = set()
range_re = re.compile(r"""^(\d+)-(\d+)$""") # ex. 4-6
for x in glob_list:
if x.isdigit():
numbers.add(int(x))
else:
range_match = range_re.match(x)
if range_match is None:
# The substring is neither a digit nor a range.
print("Globs must consist of numbers or ranges, but {0} is "
"neither. We were given glob '{1}'.".format(x, glob))
return []
else:
min_range = int(range_match.group(1))
max_range = int(range_match.group(2))
if max_range >= min_range:
numbers.update(range(min_range, max_range + 1))
else:
# print("Glob \"{0}\" contains the invalid range \"{1}\"."
# .format(glob, x)) # ex. 6-4
return []
return sorted(numbers) # A sorted list
def ifname_expand_glob(ifname): # noqa
if not isinstance(ifname, (str)):
raise TypeError("This function takes a string and returns a list of "
"strings. type(ifname)={0}".format(type(ifname)))
return ifname_expand_glob_helper(ifname, [])
def ifname_expand_glob_helper(ifname, result): # noqa
""" This function is recursive. """
if ifname == '':
# Base case 1
return result
if not ifname_is_glob(ifname):
# Base case 2: non-globish input
result.append(ifname)
return result
# Get the first glob component. This could be a single name, like "bridge"
# or it could be a range with commas and hyphens. For example, given
# "swp1-7,9", get the entire string.
# Given "swp1-7,9,eth0", get "swp1-7,9,".
glob = ''
# Subinterface base and range?
m = (re.match(
r"""(?P<base>[a-zA-Z0-9-]+?\-?(?:\d+s)?\d+\.)(?P<glob>(?:0(?!\d)|[1-9]\d*)((,|-)\d+)+,?)""", # noqa
ifname)) # noqa
if m is None:
# Non-subinterface base and range?
m = (re.match(
r"""(?P<base>[a-zA-Z0-9-]+?\-?(?:\d+s)?)(?P<glob>(?:0(?!\d)|[1-9]\d*)((,|-)\d+)+,?)""", # noqa
ifname)) # noqa
if m is None:
m = re.match(r"""(?P<base>\S+?),""", ifname)
if m is not None:
# The input begins with a component that doesn't have a range.
# Ex: lo, bridge, peer-group, Bond-T, server02, etc.
glob = None
else:
raise ValueError("Couldn't parse '{0}'.".format(ifname))
# Append the expanded substring of interfaces to the result.
base = m.group('base')
assert not ifname_is_glob(base), "base = {0}".format(base)
if glob is None:
# Append a single interface name to the result.
result.append(base)
else:
# Append a multiple interface names to the result.
glob = m.group('glob').rstrip(',')
for number in glob_to_numbers(glob):
result.append('{0}{1}'.format(base, number))
# Recurse with the remaining input string.
return ifname_expand_glob_helper(ifname[len(m.group()):], result)
def ifname_is_glob(ifname): # noqa
assert isinstance(ifname, str), "ifname={0}".format(ifname)
# The empty string and strings with spaces are not globs.
if not ifname or ' ' in ifname:
return False
if re.search(r"""\S,\S""", ifname) is not None:
# Strings with comma-separated components are always a glob.
return True
# Strings with hyphens might be globs.
re_range = re.search(r"""(?<!-)(\d+)-(\d+)(,|$)""", ifname)
if re_range is not None:
start_range = re_range.group(1)
end_range = re_range.group(2)
if ((len(start_range) > 1 and start_range.startswith('0')) or
end_range.startswith('0')):
# Valid ranges do not contain lead zeros.
# '0' is not valid as the end range.
return False
if int(end_range) > int(start_range):
return True
return False
def test_check_dependencies(deprecated, satisfied): # noqa
# This function tests the check_dependencies function.
# This is a test list of satisfied modules.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return(satisfied)
test = ['module1', 'module2']
# This is a list of reqs that should be satisfied by test.
should_pass = ['module1', 'module2']
# This is a list of reqs that should not be satisfied by test.
should_fail = ['module2', 'module3']
if not check_dependencies('TEST: should_pass', should_pass, test):
logger.error("ERROR! Function check_dependencies is broken! "
"False Negative")
exit(1)
if check_dependencies('TEST: should_fail', should_fail, test):
logger.error("ERROR! Function check_dependencies is broken! "
"False Positive")
exit(1)
satisfied.append(name)
return satisfied
def verify_path(path):
"""
Verify the normalized directory or file path exists.
:param path:
:return normalized path:
"""
path = os.path.abspath(os.path.expanduser(path))
# if path location does not exist, exit.
if not os.path.exists(path):
logger.error("Filesystem path {} invalid.".format(path))
exit(1)
else:
return path
def wisdom(deprecated, satisfied, info):
"""TE-WISDOM is just a fun little function that adds a one-liner."""
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return(satisfied, info)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return(satisfied, info)
wisdom = [
'This CL-SUPPORT Analysis is brought to you by Coors Light... '
'Taste the Rockies!',
'# rm -rf / ; reboot - Because its never too late to start again!',
'Nothing makes a person more productive than the LAST MINUTE!',
'I had my patience tested. I\'m negative.',
'Interviewer: "What do you make at your current job?" '
'ME: "Mostly mistakes!"',
'Dear Karma, I have a list of people you missed!!!',
'Don\'t forget to shout "JENGA" when everything falls apart...',
'Calories: Tiny creatures that live in your closet and sew your '
'clothes a little tighter every night.',
'A little bit goes a long way says the Big-Endian...',
'My backup plan is just my original plan - with more ALCOHOL!',
'Light travels faster than sound. This is why some people appear '
'bright until you hear them speak.',
'Silence is golden. Duct-tape is silver.',
'If at first, you don\'t succeed, skydiving is not for you!',
'My imaginary friend says that you need a therapist!',
'My neighbor\'s diary says that I have boundary issues...',
'I clapped because it\'s finished, not because I liked it.',
'What do you mean I\'m not in shape? Round is a shape!',
'I\'m smiling. That alone should scare you!',
'Common sense is a flower that doesn\'t grow in everyone\'s garden...',
'Your trial license for Turbo-Entabulator has expired. Generating '
'random false-positives.',
]
rand = random.randrange(0, len(wisdom))
info.append('TE-WISDOM: {}'.format(wisdom[rand]))
return(satisfied, info)
| [
"noreply@github.com"
] | xinyli-cumulus.noreply@github.com |
f525a1f530ac0b939164e1ae587b3a12727bf3d3 | e1f78a71c0ce255ab064e0fa9fb3bdb7251bb016 | /src/QuickPaint.py | 7d9e480cf817e89458f9543abf8c54f0d1bd2c03 | [] | no_license | dylansloann/SketchMath | 7675f7e40ef5ae31675c1fa062e2718f41390c07 | 874e624dd3a86a0f879fa54f609115fd393bb1dc | refs/heads/master | 2023-05-30T00:55:00.461682 | 2021-06-13T05:00:44 | 2021-06-13T05:00:44 | 294,305,328 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,745 | py | from PyQt5 import QtCore, QtGui, QtWidgets
import sys, os
class Paint(QtWidgets.QMainWindow):
def __init__(self):
super(Paint, self).__init__()
self.windowSetup()
self.menuOptionsSetup()
self.saveCommmandSetup()
self.eraseCommmandSetup()
self.colorsSetup()
self.brushSizeSetup()
def windowSetup(self):
self.setWindowTitle("QuickPaint")
self.setGeometry(100, 100, 500, 500)
self.setFixedSize(500, 500)
self.setWindowIcon(QtGui.QIcon("./icons/painticon.png"))
self.image = QtGui.QImage(self.size(), QtGui.QImage.Format_RGB32)
self.image.fill(QtCore.Qt.white)
# default brush
self.brushSize = 2
self.drawing = False
self.brushColor = QtCore.Qt.black
self.lastPoint = QtCore.QPoint()
def menuOptionsSetup(self):
global main_menu
main_menu = self.menuBar()
global file_menu
file_menu = main_menu.addMenu("File")
global color_menu
color_menu = main_menu.addMenu("Color")
global size_menu
size_menu = main_menu.addMenu("Size")
def saveCommmandSetup(self):
save_command = QtWidgets.QAction(QtGui.QIcon("./icons/saveicon.png"), "Save", self)
save_command.setShortcut("Ctrl + S")
file_menu.addAction(save_command)
save_command.triggered.connect(self.save)
def eraseCommmandSetup(self):
erase_command = QtWidgets.QAction(QtGui.QIcon("./icons/brushicon.png"), "Erase", self)
erase_command.setShortcut("Ctrl + E")
file_menu.addAction(erase_command)
erase_command.triggered.connect(self.erase)
# designation of menu bar commands
def save(self):
file_path = QtWidgets.QFileDialog.getSaveFileName(self, "Save Image", "", "PNG;;JPG;;All_Files")
if file_path[0] == "":
return
self.image.save(file_path[0])
def erase(self):
self.image.fill(QtCore.Qt.white)
self.update()
def colorsSetup(self):
black = QtWidgets.QAction(QtGui.QIcon("./icons/blackicon.png"), "Black", self)
color_menu.addAction(black)
black.triggered.connect(self.color_black)
white = QtWidgets.QAction(QtGui.QIcon("./icons/whiteicon.png"), "White", self)
color_menu.addAction(white)
white.triggered.connect(self.color_white)
darkCyan = QtWidgets.QAction(QtGui.QIcon("./icons/darkCyanicon.png"), "Cyan", self)
color_menu.addAction(darkCyan)
darkCyan.triggered.connect(self.color_darkCyan)
darkBlue = QtWidgets.QAction(QtGui.QIcon("./icons/darkBlueicon.png"), "Blue", self)
color_menu.addAction(darkBlue)
darkBlue.triggered.connect(self.color_darkBlue)
darkMagenta = QtWidgets.QAction(QtGui.QIcon("./icons/darkMagentaicon.png"), "Magenta", self)
color_menu.addAction(darkMagenta)
darkMagenta.triggered.connect(self.color_darkMagenta)
darkRed = QtWidgets.QAction(QtGui.QIcon("./icons/darkRedicon.png"), "Dark Red", self)
color_menu.addAction(darkRed)
darkRed.triggered.connect(self.color_darkRed)
# designation of colors
def color_black(self):
self.brushColor = QtCore.Qt.black
def color_white(self):
self.brushColor = QtCore.Qt.white
def color_darkCyan(self):
self.brushColor = QtCore.Qt.darkCyan
def color_darkBlue(self):
self.brushColor = QtCore.Qt.darkBlue
def color_darkMagenta(self):
self.brushColor = QtCore.Qt.darkMagenta
def color_darkRed(self):
self.brushColor = QtCore.Qt.darkRed
def brushSizeSetup(self):
size4 = QtWidgets.QAction(QtGui.QIcon("./icons/4icon.png"), "4 pixels", self)
size_menu.addAction(size4)
size4.triggered.connect(self.Brush4)
size8 = QtWidgets.QAction(QtGui.QIcon("./icons/8icon.png"), "8 pixels", self)
size_menu.addAction(size8)
size8.triggered.connect(self.Brush8)
size12 = QtWidgets.QAction(QtGui.QIcon("./icons/12icon.png"), "12 pixels", self)
size_menu.addAction(size12)
size12.triggered.connect(self.Brush12)
size16 = QtWidgets.QAction(QtGui.QIcon("./icons/16icon.png"), "16 pixels", self)
size_menu.addAction(size16)
size16.triggered.connect(self.Brush16)
# designation of brush sizes
def Brush4(self):
self.brushSize = 4
def Brush8(self):
self.brushSize = 8
def Brush12(self):
self.brushSize = 12
def Brush16(self):
self.brushSize = 16
# mouse movement and action setup
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.drawing = True
self.lastPoint = event.pos()
def mouseReleaseEvent(self, action):
if action.button() == QtCore.Qt.LeftButton:
self.drawing = False
def mouseMoveEvent(self, event):
if(event.buttons() & QtCore.Qt.LeftButton) & self.drawing:
painter = QtGui.QPainter(self.image)
painter.setPen(QtGui.QPen(self.brushColor, self.brushSize, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
painter.drawLine(self.lastPoint, event.pos())
self.lastPoint = event.pos()
self.update()
# setup of painter
def paintEvent(self, event):
canvasPainter = QtGui.QPainter(self)
canvasPainter.drawImage(self.rect(), self.image, self.image.rect())
| [
"dylansloann2@gmail.com"
] | dylansloann2@gmail.com |
b3743862fc7b8de3b6dca5344e37f61f50a634eb | b97a608517f024b81db0bdc4094d143ba87c8af4 | /src/oceandata/export_production/mouw.py | 5922a9fe193338af1b8d507473dce963eb6aaa90 | [
"MIT"
] | permissive | brorfred/oceandata | ff008042cc993a07d9db1de3fa72e70f70d44219 | 831e0691223da1aa6a6e97175e8c2d7874bf60cd | refs/heads/master | 2022-02-14T11:48:13.401206 | 2022-01-27T17:01:56 | 2022-01-27T17:01:56 | 175,451,337 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py | """
Global ocean particulate organic carbon flux.
Ref: https://doi.org/10.1594/PANGAEA.855594,
"""
import os, pathlib
import warnings
import pandas as pd
import numpy as np
import requests
DATADIR = pathlib.PurePath(pathlib.Path.home(), ".oceandata")
pathlib.Path(DATADIR).mkdir(parents=True, exist_ok=True)
DATAURL = "https://doi.pangaea.de/10.1594/PANGAEA.855594"
"""
def load():
df = pd.read_hdf("h5files/ep_mouw_with_sat.h5")
df["Zeu"] = 4.6/df.kd490
df["ep_obs"] = df.POC_flux
df["chl"] = df["chl"] * df["Zeu"]
#lh = ecoregions.Longhurst()
#longh = lh.match("regions", lonvec=dfm.lon, latvec=dfm.lat, jdvec=dfm.lat*0)
#dfm["longhurst"] = longh
return df
"""
def load(datadir=DATADIR, filename="GO_flux.tab", with_std=False):
"""Load tab file and fix some columns"""
fn = os.path.join(datadir, filename)
if not os.path.isfile(fn):
download(datadir=datadir, filename=filename)
with open(fn ,"r") as fH:
while 1:
line = fH.readline()
if "*/" in line:
break
df = pd.read_csv(fH, sep="\t", parse_dates=[1,])
if not with_std:
df.drop(columns=['Flux std dev [±]', 'C flux [mg/m**2/day]',
'C flux std dev [±]', 'POC flux std dev [±]',
'PIC flux std dev [±]', 'PON flux std dev [±]',
'POP flux std dev [±]', 'PSi flux std dev [±]',
'PAl std dev [±]', 'CaCO3 flux std dev [±]',
'Reference'], inplace=True)
df.rename(columns={'ID (Reference identifier)':"ref_ID",
'ID (Unique location identifier)':"UUID",
'Type (Data type)':"sampling_type",
'Latitude':"lat",
'Longitude':"lon",
'Flux tot [mg/m**2/day]':"tot_flux",
'POC flux [mg/m**2/day]':"POC_flux",
'PIC flux [mg/m**2/day]':"PIC_flux",
'PON flux [mg/m**2/day]':"PON_flux",
'POP flux [mg/m**2/day]':"POP_flux",
'PSi flux [mg/m**2/day]':"PSi_flux",
'PSiO2 flux [mg/m**2/day]':"PSiO2_flux",
'PSi(OH)4 flux [mg/m**2/day]':"PSiOH4_flux",
'PAl [mg/m**2/day]':"PAl_flux",
'Chl flux [mg/m**2/day]':"Chl_flux",
'Pheop flux [µg/m**2/day]':"Pheop_flux",
'CaCO3 flux [mg/m**2/day]':"CaCO3_flux",
'Fe flux [mg/m**2/day]':"Fe_flux",
'Mn flux [µg/m**2/day]':"Mn_flux",
'Ba flux [µg/m**2/day]':"Ba_flux",
'Detrital flux [mg/m**2/day]':"Detr_flux",
'Ti flux [µg/m**2/day]':"Ti_flux",
'Bathy depth [m] (ETOPO1 bathymetry)':"bathy",
'Depth water [m] (Sediment trap deployment depth)':"depth",
'Area [m**2]':"area",
'Duration [days]':"duration",
'Date/Time (Deployed)':"start_time",
'Date/time end (Retrieved)':"end_time",
'Area [m**2] (Surface area of trap)':"trap_area",
},
inplace=True)
df.drop(columns=['Type (Sediment trap type)',
'Elevation [m a.s.l.] (Total water depth)'],
inplace=True)
df["start_time"] = pd.DatetimeIndex(df["start_time"])
df["end_time"] = pd.DatetimeIndex(df["end_time"])
df.set_index("end_time", inplace=True)
return df
def download(datadir=DATADIR, filename="GO_flux.tab"):
"""Download txt file from BATS server
Refs
----
"""
local_filename = os.path.join(datadir, filename)
try:
os.unlink(local_filename)
except FileNotFoundError:
pass
try:
r = requests.get(DATAURL, stream=True, timeout=6, params={"format":"textfile"})
except requests.ReadTimeout:
warnings.warn("Connection to server timed out.")
return False
if r.ok:
if local_filename is None:
return r.text
else:
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
else:
raise IOError(f"Could not download file from server, Error {r.status_code}")
| [
"brorfred@gmail.com"
] | brorfred@gmail.com |
7b330a04a2dde22bdff089a6ed4a3ec386cbc41c | a78fa01825c57797d45d57f7e7143ef91024aa1e | /db_tools/import_category_data.py | 970b412ad79f732fb880182e770e6ca7a2c8a308 | [] | no_license | giwatest/MxShop | 8e68fb917d7ccc3f6cca24bc654cb1868f0c1409 | 4500eb6d4c85110ed3c97209c007be35ceb1cd6b | refs/heads/master | 2020-07-19T01:53:49.427411 | 2020-02-23T14:58:34 | 2020-02-23T14:58:34 | 206,355,008 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # encoding: utf-8
__author__ = 'GIWA'
#批量导入商品类目
#
#
import sys
import os
pwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(pwd+'../')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MxShop.settings")
import django
django.setup()
from goods.models import GoodsCategory
from db_tools.data.category_data import row_data
for lev1_cat in row_data:
lev1_instance = GoodsCategory()
lev1_instance.name = lev1_cat['name']
lev1_instance.code = lev1_cat['code']
lev1_instance.category_type = 1
lev1_instance.save()
for lev2_cat in lev1_cat['sub_categorys']:
lev2_instance = GoodsCategory()
lev2_instance.name = lev2_cat['name']
lev2_instance.code = lev2_cat['code']
lev2_instance.category_type = 2
lev2_instance.parent_category = lev1_instance
lev2_instance.save()
for lev3_cat in lev2_cat['sub_categorys']:
lev3_instance = GoodsCategory()
lev3_instance.name = lev3_cat['name']
lev3_instance.code = lev3_cat['code']
lev3_instance.category_type = 3
lev3_instance.parent_category = lev2_instance
lev3_instance.save() | [
"bingna.liu@xinchan.com"
] | bingna.liu@xinchan.com |
c257da7a0180dbf630338ad35acd1a55e212f6fa | 703aa4509109552e91e1f3db39146f723b6256d0 | /motores.py | 5f695b4f176b91edc5fdb5ca8ec9aa0d10a29a45 | [] | no_license | alfredobs97/PythonMysqlScript | 2334fb0392b8039862e77c382d7a23b4763bc8ed | 91bcaab69ee4f518697cc3dd15e7bf43bab4465c | refs/heads/master | 2021-01-13T15:47:43.093460 | 2017-02-09T16:59:01 | 2017-02-09T16:59:01 | 79,963,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #!/usr/bin/python
import MySQLdb
db = MySQLdb.connect(host="192.168.8.16",
user="root",
passwd="1234",
db="mysql")
cur = db.cursor()
cur.execute("SHOW ENGINES")
ver = cur.fetchall()
print "Version de Mysql : %s" %ver
| [
"alfredobautista1@gmail.com"
] | alfredobautista1@gmail.com |
569977b9ce4461b125524e9caad267bb700d509d | 1a3b527145549c7d69f42831ea12c468e1ebb209 | /math.py | ad4498fd08e2a47d2f83cef8a60021d5b965e988 | [] | no_license | muhammadagus030201/finalproject | 7654094d0549122fb14a04441cb606bc3208f972 | 3b3fe4a6e13d94ff217f61e6e8bd5aebec678ddd | refs/heads/main | 2023-03-21T12:23:22.102040 | 2021-03-06T16:11:18 | 2021-03-06T16:11:18 | 345,130,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | x = 10
y = 3
#tambah
z1 = x+y
print("Hasil Pertambahan {} + {} = {}".format(x,y,z1))
#bagi
z2 = x/y
print("Hasil Pembagian {} / {} = {}".format(x,y,z2))
#moduloatausisabagi
z3 = x%y
print("Hasil Modulo {} % {} = {}".format(x,y,z3))
#pangkat
z4 = x**y
print("Hasil Pangkat {} ** {} = {}".format(x,y,z4)) | [
"belajarpython030201@gmail.com"
] | belajarpython030201@gmail.com |
670e94a3bedc2fa474c3d44db8f5ae1bab732516 | f3515dd05089b6491ccb4c28ac6654b7f86e77b0 | /hw_4/final_submittal/cross_validation_script.py | 11b87a12e232f801086c9ba118854eadc8e211eb | [] | no_license | jb08/AI | f17409832c0af8710957f4b4d6c80d90aa06f198 | 8f05d6989174b3c6b76547c5370042038a54e78d | refs/heads/master | 2021-01-01T04:12:10.496120 | 2016-06-09T16:40:58 | 2016-06-09T16:40:58 | 56,189,015 | 0 | 0 | null | 2016-06-08T17:06:55 | 2016-04-13T22:01:04 | Python | UTF-8 | Python | false | false | 5,263 | py | # Name: Megan Sinclair, David Williams, Jason Brown
# Date: 5/23/16
# All group members were present and contributing during all work on this project
#
# Note that there is a retrain function in this script. This function mirrors the training
# that is present in our bayes.py and bayesbest.py files. Mirroring it here was simply
# done to make our cross-validation easier, but bayesbest.py is still intended to be used
# by itself.
import bayes
import bayesbest
import os, time
def ten_fold():
pos_true = 0
pos_false = 0
neg_true = 0
neg_false = 0
best_pos_true = 0
best_pos_false = 0
best_neg_true = 0
best_neg_false = 0
bc = bayes.Bayes_Classifier()
bcc = bayesbest.Bayes_Classifier()
for i in range(10):
training,testing = single_fold(i)
retrain(bc,training, False)
retrain(bcc,training, True)
#print "\tDone training"
#print len(testing)
#print len(training)
#time.sleep(3)
ct = 1
for f in testing:
sTxt = bc.loadFile("movies_reviews/" + f)
bc_result = bc.classify(sTxt)
bcc_result = bcc.classify(sTxt)
#print "\tTested: " ,ct
ct += 1
if (f.startswith("movies-5")):
if bc_result == "positive":
pos_true += 1
else:
pos_false += 1
if bcc_result == "positive":
best_pos_true += 1
else:
best_pos_false += 1
elif (f.startswith("movies-1")):
if bc_result == "negative":
neg_true += 1
else:
neg_false += 1
if bcc_result == "negative":
best_neg_true += 1
else:
best_neg_false += 1
print "fold: ", i
print "\treg results: %d %d %d %d" % (pos_true, pos_false, neg_true, neg_false)
print "\tbest results: %d %d %d %d" % (best_pos_true, best_pos_false, best_neg_true, best_neg_false)
#precision
precision_positive = pos_true / float(pos_true + pos_false)
precision_negative = neg_true / float(neg_true + neg_false)
best_precision_positive = best_pos_true / float(best_pos_true + best_pos_false)
best_precision_negative = best_neg_true / float(best_neg_true + best_neg_false)
#recall
recall_positive = pos_true / float(pos_true + neg_false)
recall_negative = neg_true / float(neg_true + pos_false)
best_recall_positive = best_pos_true / float(best_pos_true + best_neg_false)
best_recall_negative = best_neg_true / float(best_neg_true + best_pos_false)
#f-measure
f_measure_positive = (2 * precision_positive * recall_positive) / float(precision_positive + recall_positive)
f_measure_negative = (2 * precision_negative * recall_negative) / float(precision_negative + recall_negative)
best_f_measure_positive = (2 * best_precision_positive * best_recall_positive) / float(best_precision_positive + best_recall_positive)
best_f_measure_negative = (2 * best_precision_negative * best_recall_negative) / float(best_precision_negative + best_recall_negative)
print "naive bayes classifier:"
print " precision_positive: %.3f" % precision_positive
print " precision_negative: %.3f"% precision_negative
print " recall_positive: %.3f" %recall_positive
print " recall_negative: %.3f" %recall_negative
print " f_measure_positive: %.3f" %f_measure_positive
print " f_measure_negative: %.3f" %f_measure_negative
print " "
print "naive bayes classifier (improved):"
print " precision_positive: %.3f" %best_precision_positive
print " precision_negative: %.3f" %best_precision_negative
print " recall_positive: %.3f" %best_recall_positive
print " recall_negative: %.3f" %best_recall_negative
print " f_measure_positive: %.3f" %best_f_measure_positive
print " f_measure_negative: %.3f" %best_f_measure_negative
def single_fold(start_val):
count = start_val%10 #10 fold validation
IFileList = []
for fFileObj in os.walk("movies_reviews/"):
IFileList = fFileObj[2]
break
training_set = []
testing_set = []
for f in IFileList:
#Training set
if(count == 9):
#print "count was: ", count, " ; append to testing_set"
testing_set.append(f)
count = 0
else:
#print "count was: ", count, " ; append to training_set"
training_set.append(f)
count+=1
return training_set,testing_set
def retrain(bc, training_set, is_best):
#For each file name, parse and determine if pos (5) or neg (1)
bc.positive = dict()
bc.negative = dict()
for f in training_set:
#Positive review, add words/frequencies to positive dictionary
if (f.startswith("movies-5")):
bc.dictionary = bc.positive
#Negative review, add words/frequencies to negative dictionary
elif (f.startswith("movies-1")):
bc.dictionary = bc.negative
else:
#print "error: file didn't start with movies-1 or movies-5"
continue
sTxt = bc.loadFile("movies_reviews/" + f)
token_list = bc.tokenize(sTxt)
#print "dictionary: ", dictionary
for word in token_list:
if (is_best):
word = word.lower()
#If word exists in dictionary already, increase frequency by 1
if word in bc.dictionary:
bc.dictionary[word] +=1
#Add word to dictionary with frequency of 1 if it did not already exist
else:
bc.dictionary[word] = 1 | [
"jasonkingsley.brown@gmail.com"
] | jasonkingsley.brown@gmail.com |
b9ac3eaf94bdd09fd0832248e58d306bcfe3a66b | d8f44692c9f9f0a9a391a49db0f4f659a2ef6fe8 | /jsBuilds/jsBuilder.py | c6d54a0f8bcd6b1b48141c786117bd378dc21f5d | [
"MIT"
] | permissive | skylarkgit/sql2phpclass | 045e71963574b719313fc98882f5c710435f101f | a79e7f3cfda8cb41ba00e8cbba0de33e9be759d6 | refs/heads/master | 2020-03-19T02:34:34.229287 | 2018-07-04T18:58:28 | 2018-07-04T18:58:28 | 135,640,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,062 | py | import sys
sys.path.append('..')
from jsBuilds.jsTemplates import *
from jsBuilds.jsSupport import *
from lib.fileOps import *
from dtfSupport import *
import jsbeautifier
tables=None
DEPENDENCIES="$scope,archonAPI,ToolBag,$http,$window,$filter,$mdDialog"
def getSelectServices(tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getForiegnOTMKeys()
code=""
for v in varList.values():
code+=(ARCHONCALL('"select"',"'"+tables[v.keyReference].alias+"'",'""',POSTSUBMISSION(SCOPE(v.alias+"Select")+'=response.data.data;',ONFAILURE('"COULDN\'t FETCH DATA FROM '+tableName+' : "+response.data.data'))))
varList=tableSurface.getForiegnOTOKeys()
for v in varList.values():
code+=getSelectServices(tables[v.keyReference])
return code
def getShowService(tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getForiegnOTMKeys()
code=ARCHONCALL('"Get"',"'"+tableName+"'",'""',POSTSUBMISSION(SCOPE(tableName+"Data")+'=response.data.data;',ONFAILURE('"COULDN\'t FETCH DATA FROM '+tableName+' : "+response.data.data')))
return code
def setTables(tableSurfaces):
global tables
tables=tableSurfaces
def getSubmission(tableSurface):
#code="var obj={"+createObjFromScope(tableSurface.getSettable())+"};\n"
NV=getAllSettables(tables,tableSurface,{})
print(",".join(NV))
code=SUBMISSION('"add"',CALL('ToolBag.objToCallArgs',createObjFromScope(NV)),"'"+tableSurface.alias+"'",POSTSUBMISSION(ONSUCCESS('"Data Saved"'),ONFAILURE('response.data.data')))
return code
def getUpdation(tableSurface):
#code="var obj={"+createObjFromScope(tableSurface.getSettable())+"};\n"
NV=getAllVars(tables,tableSurface,{})
print(",".join(NV))
code=SUBMISSION('"update"',CALL('ToolBag.objToCallArgs',createObjFromScope(NV)),"'"+tableSurface.alias+"'",POSTSUBMISSION(ONSUCCESS('"Data Saved"'),ONFAILURE('response.data.data')))
return code
def getFetchById(tableSurface,obj,code):
#code="var obj={"+createObjFromScope(tableSurface.getSettable())+"};\n"
code=ARCHONCALL("'fetch'","'"+tableSurface.alias+"'",CALL('ToolBag.objToCallArgs',createObjFromScope(obj)),POSTSUBMISSION(code,ONFAILURE('response.data.data')))
return code
def createAddController(tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getSettable()
code=SCOPE(VALIDITY(SCOPE('add'+tableName+'Controller')))
code+=SCOPE('showAdvanced')+'=ToolBag.showAdvanced;\n'
code+=getSelectServices(tableSurface)
code+=getSubmission(tableSurface)
return OBJ('app',CONTROLLER(CONTROLLERNAME('add',tableName),DEPENDENCIES,code))
def buildShowController(tableSurface):
tableName=tableSurface.alias
code=SCOPE('showAdvanced')+'=ToolBag.showAdvanced;\n'
code+=getShowService(tableSurface)
return OBJ('app',CONTROLLER(CONTROLLERNAME('show',tableName),DEPENDENCIES,code))
def buildUpdateController(tables,tableSurface):
tableName=tableSurface.alias
varList=tableSurface.getSettable()
keys=tableSurface.getKeys()
code=argsToScope(keys)
code+=getFetchById(tableSurface,keys,responseToScope(getAllVars(tables,tableSurface,{})))
code+=SCOPE(VALIDITY(SCOPE('update'+tableName+'Controller')))
code+=SCOPE('showAdvanced')+'=ToolBag.showAdvanced;\n'
code+=getSelectServices(tableSurface)
code+=getUpdation(tableSurface)
return OBJ('app',CONTROLLER(CONTROLLERNAME('update',tableName),DEPENDENCIES+","+",".join(keys),code))
def buildControllers(tableSurfaces):
global tables
tables=tableSurfaces
code=""
pc=""
touchd('js')
for t in tables.values():
code+=createAddController(t)
pc+=CASE("'"+CONTROLLERNAME('add',t.alias)+"'",'return '+CONTROLLERNAME('add',t.alias)+';')
for t in tables.values():
code+=buildShowController(t)
pc+=CASE("'"+CONTROLLERNAME('show',t.alias)+"'",'return '+CONTROLLERNAME('show',t.alias)+';')
for t in tables.values():
code+=buildUpdateController(tables,t)
pc+=CASE("'"+CONTROLLERNAME('update',t.alias)+"'",'return '+CONTROLLERNAME('update',t.alias)+';')
pc=SWITCH('ctrl',pc)
pc='obj.controllerProvider=function(ctrl){{{code}}}'.format(code=pc)
f=open('js\controllers.js','w')
f.write(jsbeautifier.beautify(pc+code))
| [
"abhay199658@gmail.com"
] | abhay199658@gmail.com |
bda1259acf1f9e58440de1958bf26bb65f5b568f | 144590772aaa89e5ead8936512b0bd035c215c7b | /resilient-circuits/tests/selftest_tests/mocked_success_script.py | 3ea0cb2eea6c0d23aebd1c0b1c2c4e1b72008c59 | [
"MIT"
] | permissive | ibmresilient/resilient-python-api | f65dad3f3c832581127026fa3e626eaf3d4749a7 | 84e8c6d9140ceac0bf47ce0b98e11c7953d95e61 | refs/heads/main | 2023-07-23T12:36:49.551506 | 2023-07-11T15:15:43 | 2023-07-11T15:15:43 | 101,414,862 | 37 | 31 | MIT | 2023-09-07T14:00:34 | 2017-08-25T14:59:45 | Python | UTF-8 | Python | false | false | 276 | py |
def selftest(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
return {
"state": "success",
"reason": None
} | [
"Ryan.Gordon1@ibm.com"
] | Ryan.Gordon1@ibm.com |
4b23d796e4e6e0eaf5c71897207ec14d8b6168c5 | 5dc20d163ac874bef45f8aeadbdc8bef1697ea64 | /python-asyncio/src/asyncio/basic.py | bdbc3208817df584096fd5999af8abb797074038 | [] | no_license | bartfrenk/sandbox | 67241860ea35437a0f032a1b656a63908289fe19 | 563fc0051e742cc735c5da4b58a66ccf926e2b16 | refs/heads/master | 2022-12-10T04:51:56.396228 | 2020-02-23T11:18:28 | 2020-02-23T11:18:28 | 60,007,554 | 0 | 0 | null | 2016-05-30T12:12:09 | 2016-05-30T12:12:09 | null | UTF-8 | Python | false | false | 427 | py | import asyncio
import sys
class Test:
def __init__(self, number):
self.number = number
async def run(self):
print("The magic number is...", end=" ")
sys.stdout.flush()
await asyncio.sleep(1)
print(self.number)
async def main():
print("Hello")
await asyncio.sleep(1)
print("... World!")
if __name__ == "__main__":
test = Test(5)
asyncio.run(test.run())
| [
"bart.frenk@gmail.com"
] | bart.frenk@gmail.com |
07e30b5ca44e0780d580e0e6e6bb3d6b3d5b027e | 031b1c5b0c404f23ccd61a08845695bd4c3827f2 | /python/pyfiles/算术运算符.py | 39efec4aa582072f142c44bd1bc23d687686d1e0 | [] | no_license | AndyFlower/zixin | c8d957fd8b1e6ca0e1ae63389bc8151ab93dbb55 | 647705e5f14fae96f82d334ba1eb8a534735bfd9 | refs/heads/master | 2022-12-23T21:10:44.872371 | 2021-02-10T07:15:21 | 2021-02-10T07:15:21 | 232,578,547 | 1 | 0 | null | 2022-12-16T15:41:14 | 2020-01-08T14:13:25 | Java | UTF-8 | Python | false | false | 795 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 22:57:02 2020
@author: sanglp
"""
# +运算符
print(3+5)
print(3.4+4.5)
print((3+4j)+(4+5j))
print('abc'+'def')
print([1,2]+[3,4])
print((1,2)+(3,))
# -运算符
print(7.9 -4.5) # 浮点数有误差
print(5-3)
num = 3
print(-num)
print(--num)
print({1,2,3}-{3,4,5}) #计算差集
# *运算符
print(3333*5555)
print((3+4j)*(5+6j))
print('重要的事情说3遍'*3)
print([0]*5)
print((0,)*3)
# /和//运算符
print(17 / 4)
print(17 // 4) #4
print((-17) / 4)
print((-17) // 4) #-5
# %运算符
print(365 %7)
print(365 %2)
print('%c,%c,%c' %(65,97,48)) # 数字格式化为字符 A,a,0
# **运算符
print(2 ** 4)
print(3 ** 3 ** 3)
print(3 ** (3**3))
print((3**3)**3)
print(9**0.5)
print((-1)**0.5) # 对负数计算平方根得到负数 | [
"1308445442@qq.com"
] | 1308445442@qq.com |
132c053eb5afe2d84aa47b7ec1f8974eb06f8dce | f34ed25e140a1e9f09d1fb4253674b317b989125 | /NURB/manage.py | 52c8c2eebf023aad825462247ee388c57a0c342b | [] | no_license | westonpace/NUR | 925ae3e01a5315292d3cb96d98603dd77182acec | 01b8e657583c549afda0e11abb9b9fb8712147eb | refs/heads/master | 2021-01-22T11:51:14.011919 | 2013-05-22T03:09:01 | 2013-05-22T03:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NURB.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"weston.pace@gmail.com"
] | weston.pace@gmail.com |
0cd9df6b49c7bef9b37f49d71c4534f94c55be94 | ccf06f8f91a1068fc12edffb379d35dbe4f6388e | /getReplyIds.py | 8c510d9a54bfce6441aacd116135fea88e804c7c | [
"Apache-2.0"
] | permissive | online-behaviour/machine-learning | 46c37c5e37de4323c778bc2ffc80024f4e34a004 | 2ff0e83905985ec644699ece44c75dd7422a7426 | refs/heads/master | 2021-07-09T04:36:48.441324 | 2021-04-28T13:58:00 | 2021-04-28T13:58:00 | 87,834,727 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/python3 -W all
# getReplyIds.py: extract ids and reply-ids from tweets in json
# usage: getReplyIds.py < file
# 20170918 erikt(at)xs4all.nl
import csv
import json
import re
import sys
COMMAND = sys.argv.pop(0)
ID = "id"
REPLYTO = "in_reply_to_status_id"
SCREENNAME = "screen_name"
TEXT = "text"
USER = "user"
outFile = csv.writer(sys.stdout)
for line in sys.stdin:
jsonLine = json.loads(line)
if not ID in jsonLine or not REPLYTO in jsonLine or not TEXT in jsonLine or\
not USER in jsonLine or not SCREENNAME in jsonLine[USER]:
sys.exit(COMMAND+": unexpected line: "+line)
pattern = re.compile("\n")
jsonLine[TEXT] = pattern.sub(" ",jsonLine[TEXT])
outFile.writerow([str(jsonLine[ID]),str(jsonLine[REPLYTO]),\
str(jsonLine[USER][SCREENNAME]),"PARTY",
str(jsonLine[TEXT])])
| [
"erikt@xs4all.nl"
] | erikt@xs4all.nl |
f91e0e107d8ae9b3d2e01fcae49d69d459b91219 | 4ac3571fed09a6f475448ce7555abfe0daf00151 | /lettercount.py | 562fd93c1e54f46ef9fca3b9d017397542448363 | [] | no_license | ThorHlavaty/pythondicts | fb21ffd490ae2c0faa6ea43abcda0cead3f7ba97 | d7bc47a28ce1d04b212da277e05a5c624dc1feec | refs/heads/master | 2022-12-12T08:24:39.300182 | 2020-09-04T18:15:46 | 2020-09-04T18:15:46 | 290,031,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | def letter_counter(a_string):
letter_count = {}
for letter in a_string:
letter_count[letter] = a_string.count(letter)
return letter_count
def word_histogram(a_string):
word_count = {}
this_thing = a_string.lower().split()
for word in this_thing:
word_count[word] = this_thing.count(word)
return word_count and this_thing
def histogram_rank(a_string):
word_count = {}
this_thing = a_string.lower().split()
for word in this_thing:
word_count[word] = this_thing.count(word)
word_count_sorted = sorted(word_count.items(), key=lambda x: x[1])
print(f'The top three words are:\n{word_count_sorted[-1]}\n{word_count_sorted[-2]}\n{word_count_sorted[-3]}')
print(letter_counter("Bananas"))
print(word_histogram("To be or not to be"))
histogram_rank("to be or not to be or to be or maybe even to not to be lol")
| [
"thorthebore@gmail.com"
] | thorthebore@gmail.com |
88ed4535cc1d89f37f97af16d48dceabab6add6f | 1e39bbec23e4200d84237cb2446e4285736cbf98 | /options.py | b17459e6a9edea456a043196dec7c461421c41c3 | [] | no_license | JRiyaz/password-manager | 00617c4f16f7438c392baf972d66d77eca11e519 | 215947d5ce5934bd04d11f3cf1d035cf457a5fa9 | refs/heads/main | 2023-05-31T12:05:06.772659 | 2021-07-05T09:54:44 | 2021-07-05T09:54:44 | 379,824,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,787 | py | import sys
from colors import Colors
from security import PasswordManager
class Options:
# username of the user
__username = ''
# Show welcome message for first time
__welcome = True
@staticmethod
def __ask_username() -> None:
""" This method prompt the user to enter username through command line """
print(f"{Colors.CYAN}Please register here{Colors.END}")
Options.__username = input("Please enter username: ")
@staticmethod
def __ask_password() -> None:
""" This method prompt the user to enter password through command line """
pm = PasswordManager()
secure = False
while True:
msg = secure is True and 'Enter new strong password' or 'Enter your new password'
password = input(f"{msg}: ")
set_password = pm.set_password(password)
if set_password == -1:
print(f'{Colors.WARNING}Password must contain at-lease 6 characters{Colors.END}')
elif set_password == 1:
print(f'{Colors.WARNING}New password must be secured than your old password{Colors.END}')
elif set_password == 2:
print(f'{Colors.WARNING}Password already exists, it cannot be set.{Colors.END}')
else:
message = Options.__welcome and 'Your password is set' or 'Password has changed'
print(f'{Colors.GREEN}{message}{Colors.END}')
break
@staticmethod
def __show_options() -> int:
"""
This method continuously prompt the user to select from given option
until the user selects the correct option through command line
"""
wrong = False
selection = 0
while True:
if not wrong:
print(f'{Colors.BLUE}NOTE: please select from following options{Colors.END}')
else:
print(f'{Colors.WARNING}please select correct option{Colors.END}')
options = (
'1. Show all my passwords\n'
'2. Get current password\n'
'3. Set new password\n'
'4. Security level of my current password\n'
'5. Logout\n')
try:
selection = int(input(options))
except ValueError as e:
wrong = True
continue
else:
if selection in [1, 2, 3, 4, 5]:
break
else:
wrong = True
continue
return selection
@classmethod
def check_password(cls) -> bool:
"""
This method continuously prompt the user to enter current password to perform the selected
action for 3. If you enter wrong password for 3rd time program will terminate
"""
pm = PasswordManager()
pwd = input(f'{cls.__username.title()} Enter your current password to perform the action: ')
chances = 0
while True:
if pm.is_correct(pwd):
return True
else:
if chances > 2:
sys.exit('\nYour account is blocked')
print(f'{Colors.WARNING}You have entered wrong password{Colors.END}')
pwd = input(f'{Colors.FAIL}You have {3 - chances} attempts left. Please try again: {Colors.END}')
chances += 1
@classmethod
def main_menu(cls) -> None:
""" This method prompt the user to select correct options through command line """
pm = PasswordManager()
if not cls.__username:
cls.__ask_username()
if not pm.get_password():
print(f'{Colors.WARNING}You have not set password for you account{Colors.END}')
cls.__ask_password()
if cls.__welcome:
print(f'\n{Colors.GREEN}', 10 * '*', 'Welcome to Password Manager', 10 * '*', f'{Colors.END}\n')
cls.__welcome = False
while True:
selection = cls.__show_options()
if selection == 1 and cls.check_password():
print(f'{Colors.CYAN}{pm.get_all_passwords()}{Colors.END}')
elif selection == 2 and cls.check_password():
print(f'{Colors.CYAN}Your current password: {pm.get_password()}{Colors.END}')
elif selection == 3 and cls.check_password():
cls.__ask_password()
elif selection == 4 and cls.check_password():
level = pm.get_level()
strength = level == 0 and 'WEAK' or level == 1 and 'STRONG' or 'VERY STRONG'
print(f'{Colors.CYAN}Your password is: {strength}{Colors.END}')
elif selection == 5:
sys.exit('\nYou are logged out')
| [
"j.riyazu@gmail.com"
] | j.riyazu@gmail.com |
c69507f367aefa1127fc150a6f8ecc701ddc571a | 6aed964b224292fb1d76f9b5dacb0883abe929fc | /ablog/theblog/migrations/0006_auto_20200927_2319.py | 2a457ec8fc67c8216b10e0e78141980f6a740de7 | [] | no_license | satish-313/OurBlog | e575585c4e0960a552628164f1cd8dee7d99a0c5 | ce497682ba1f6be725be7cdb1d1e02241059843f | refs/heads/master | 2023-02-09T22:33:47.816592 | 2020-12-22T16:22:18 | 2020-12-22T16:22:18 | 319,668,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # Generated by Django 3.1.1 on 2020-09-27 17:49
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('theblog', '0005_auto_20200926_2239'),
]
operations = [
migrations.AlterField(
model_name='post',
name='body',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
| [
"pradhansatish53@gmail.com"
] | pradhansatish53@gmail.com |
272977aa9883b7e270a1e4aa51d6f4540f0c7ef8 | ada39040fa1e56fb7de6147ff62e6c8dee1f69bb | /Backend.py | 9d49833595073ea3e4fab7d29b580bc74cec35ea | [] | no_license | hendpraz/chatbot-pattern-matching | c18880fde6df9663768ee482c233e5804823b756 | 478c873f1e53398c6f37919cda9e0f77a0194a88 | refs/heads/master | 2020-05-09T21:09:52.091634 | 2019-04-25T01:13:50 | 2019-04-25T01:13:50 | 181,433,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,956 | py | #!/usr/bin/python
import sys
import re
from utils import stopwords, listSynonym, FAQs
#from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
#from ntlk.corpus import stopwords
#from ntlk.tokenize import word_tokenize
numOfQuestion = 0
questionDB = []
answerDB = []
#factory = StopWordRemoverFactory()
#stopwords = factory.get_stop_words()
# KNUTH MORRIS PRAT #
def bigThree(value,idxes):
#Mengembalikan indeks indeks dengan nilai terbesar
newIdxes = [0]*3
#SelectionSort
for i in range(3):
max = 0
maxIdx = -1
for j in range(i,len(idxes)):
if(value[j] > max):
max = value[j]
maxIdx = j
#Swap
temp = idxes[i]
idxes[i] = idxes[maxIdx]
idxes[maxIdx] = temp
temp = value[i]
value[i] = value[maxIdx]
value[maxIdx] = temp
for i in range(3):
newIdxes[i] = idxes[i]
return newIdxes
def borderFunctionKMP(str, m):
suffLen = 0
border = [0]*m
i = 1
while (i < m):
if (str[i] == str[suffLen]):
suffLen = suffLen + 1
border[i] = suffLen
i = i + 1
else:
if (suffLen != 0):
suffLen = border[suffLen - 1]
else:
border[i] = 0
i = i + 1
return border
def knuthMorrisPrat(string1, txt):
n = len(txt) #Dikurangi tanda tanya
m = len(string1)
match = False
wholeScore = m * 100 / n
if(wholeScore >= 90) and (wholeScore <= 110):
# Periksa seluruh string secara eksak
border = borderFunctionKMP(string1,m)
i = 0
j = 0
while (i < n):
if (string1[j] == txt[i]):
i = i + 1
j = j + 1
if (j == m):
#Pattern ditemukan
match = True
j = border[j-1]
elif (i < n) and (string1[j] != txt[i]):
#Tidak cocok, geser
if(j != 0):
j = border[j-1]
else:
i = i + 1
if(match):
return wholeScore
countMatch = 0
if(not match):
tokenizedString = string1.split()
totalLength = len(txt)
n = len(txt) - 1 # Dikurangi tanda tanya
for substring in tokenizedString:
#Cari setiap sinonimnya
listOfPattern = findSynonym(substring)
for pattern in listOfPattern:
m = len(pattern)
border = borderFunctionKMP(pattern,m)
patternMatch = False
i = 0
j = 0
while (i < n):
if (pattern[j] == txt[i]):
i = i + 1
j = j + 1
if (j == m):
#Pattern ditemukan
countMatch = countMatch + m + 1 #Ditambah sebuah spasi
j = border[j-1]
patternMatch = True
break #BreakWhile
elif (i < n) and (pattern[j] != txt[i]):
#Tidak cocok, geser
if(j != 0):
j = border[j-1]
else:
i = i + 1
if(patternMatch):
break #BreakFor
if(wholeScore <= 110):
return (countMatch * 100.0 / totalLength)
elif(countMatch > 0):
return (totalLength * 100.0 / countMatch)
else:
return 0
#Kemungkinan lain
return 0
def resultKMP(string):
#knuth-morris-Prat
max = 0
maxIdx = -1
countOfResult = 0
idxes = []
maxValues =[]
for i in range(numOfQuestion):
# Kode
x = knuthMorrisPrat(string,questionDB[i])
if(x >= 90):
#Ketemu
countOfResult = countOfResult + 1
maxValues.append(x)
idxes.append(i)
if(x > max):
max = x
maxIdx = i
if(countOfResult == 0):
if(maxIdx != -1):
idxes.append(maxIdx)
elif(countOfResult > 3):
idxes = bigThree(maxValues,idxes)
return ((countOfResult > 0), idxes)
# BOYER MOORE #
def badCharBM(string):
#Banyak jenis karakter = 256
#Diinisialisasi dengan -1
badChar = [-1]*256
m = len(string)
for i in range(m):
#Mengubah ke nilai char (tabel ASCII)
badChar[ord(string[i])] = i
return badChar
def boyerMoore(string1,txt):
n = len(txt) #Dikurangi tanda tanya
m = len(string1)
wholeScore = m * 100 / n
match = False
if(wholeScore >= 90) and (wholeScore <= 110):
# Seluruh string dicocokan
badChar = badCharBM(string1)
shift = 0
while(shift <= n-m):
j = m - 1
while(j >= 0) and (string1[j] == txt[shift+j]):
j = j - 1
if(j < 0):
# Pattern ditemukan
match = True
break #BreakWhile
else:
shift = shift + max(1, j-badChar[ord(txt[shift+j])])
if(match):
return wholeScore
if(not match):
#Per substring
tokenizedString = string1.split()
countMatch = 0
totalLength = len(txt)
n = len(txt) - 1
for substring in tokenizedString:
#Cari setiap sinonimnya
listOfPattern = findSynonym(substring)
patternMatch = False
for pattern in listOfPattern:
m = len(pattern)
badChar = badCharBM(pattern)
shift = 0
while(shift <= n-m):
j = m - 1
while(j >= 0) and (pattern[j] == txt[shift+j]):
j = j - 1
if(j < 0):
# Pattern ditemukan
countMatch = countMatch + m + 1 #Ditambah sebuah spasi
patternMatch = True
break #BreakWhile
else:
shift = shift + max(1, j-badChar[ord(txt[shift+j])])
if(patternMatch):
break #BreakFor
if(wholeScore <= 110):
return (countMatch * 100.0 / totalLength)
elif(countMatch > 0):
return (totalLength * 100.0 / countMatch)
else:
return 0
#kemungkinan lain
return 0
def resultBM(str):
#boyer moore
max = 0
maxIdx = -1
countOfResult = 0
idxes = []
maxValues = []
for i in range(numOfQuestion):
# Kode
x = boyerMoore(str,questionDB[i])
if(x >= 90):
#Ketemu
countOfResult = countOfResult + 1
idxes.append(i)
if(x > max):
max = x
maxIdx = i
if(countOfResult == 0):
if(maxIdx != -1):
idxes.append(maxIdx)
elif(countOfResult > 3):
idxes = bigThree(maxValues,idxes)
return ((countOfResult > 0), idxes)
# REGULAR EXPRESSION #
def buildString(tokenizedString, line, j):
stringBuilt = "(.*)"
for i in range(len(tokenizedString)):
if(i == j):
stringBuilt = stringBuilt + line + "(.*)"
else:
stringBuilt = stringBuilt + tokenizedString[i] + "(.*)"
def resultRegex(string):
#Regular expression
maxIdx = -1
max = 0
countOfResult = 0
idxes = []
maxValues = []
for i in range(numOfQuestion):
#Change this later
tokenizedString = string.split()
j = 0
for substring in tokenizedString:
substringSynonyms = findSynonym(substring)
for line in substringSynonyms:
pattern = buildString(tokenizedString, line, j)
x = re.search(string,questionDB[i],re.M|re.I)
if(x):
score = len(string) * 100.0 / len(questionDB[i])
if(score <= 110):
countOfResult += 1
maxValues.append(score)
idxes.append(i)
if(score > max):
max = score
break #BreakFor
if(x):
break #BreakFor
else:
j += 1
if(countOfResult == 0):
if(maxIdx != -1):
idxes.append(maxIdx)
elif(countOfResult > 3):
idxes = bigThree(maxValues,idxes)
return ((countOfResult > 0), idxes)
# OTHER FUNCTION
def otherFunc(string):
#other algorithm for pattern matching
max = 0
idx = -1
return (max, 0)
def initDB():
#Add questions and answers to database
global numOfQuestion
numOfQuestion = 1
questionDB.append("Siapa nama Anda")
answerDB.append("Aku Fluffball")
quest = open("pertanyaan.txt","r")
for line in quest:
numOfQuestion = numOfQuestion + 1
questString = line
questString = questString.replace("?","")
questString = removeStopWords(questString.strip()) + " "
questionDB.append(questString)
ans = open("jawaban.txt","r")
for line in ans:
answerDB.append(line.strip())
#print(questionDB)
#print(answerDB)
quest.close()
ans.close()
#Add FAQs
for tuple in FAQs:
numOfQuestion = numOfQuestion + 1
que, ans = tuple
questionDB.append(removeStopWords(que) + " ")
answerDB.append(ans)
def removeStopWords(string):
filteredString = ""
wordTokens = string.split()
found = False
for w in wordTokens:
if (w not in stopwords):
if(found):
filteredString = filteredString + " " + w
else:
filteredString = w
found = True
return filteredString
def findSynonym(string):
#Mencari sinonim dari suatu string
found = False
idx = -1
for listOfWords in listSynonym:
idx = idx + 1
for word in listOfWords:
if(string == word):
found = True
break
if(found):
break
if(found):
# Jika ada sinonimnya, kembalikan list of Synonym ke-idx
return listSynonym[idx]
else:
# Jika tidak ada sinonimnya, kembalikan list berisi string itu sendiri
listOneWord = []
listOneWord.append(string)
return listOneWord
def talk(string):
print("Fluffball : "+string)
# Main program #
def useKMP(string):
found, listHasil = resultKMP(string)
tampikanHasil(found,listHasil)
def useBM(string):
found, listHasil = resultBM(string)
tampikanHasil(found,listHasil)
def useRegex(string):
found, listHasil = resultRegex(string)
tampikanHasil(found,listHasil)
def tampikanHasil(found, listHasil):
if(found):
if(len(listHasil) == 1):
print(answerDB[listHasil[0]])
else: #len(listHasil) > 1
first = True
otp = ""
for i in listHasil:
if(first):
otp = questionDB[i].strip()+"?"
first = False
else:
otp = otp +", "+questionDB[i].strip()+"?"
print("Pilih pertanyaan ini : "+otp)
else:
otp = "Mungkin maksud Anda : "
if(len(listHasil) == 0):
#Kalo tidak ada isinya sama sekali
print("Saya tidak mengerti maksud Anda")
#print(otp + questionDB[0].strip()+"?)
else:
print(otp + questionDB[listHasil[0]]+"?")
def DebugAll():
initDB()
talk("Halo, ada yang bisa dibantu?")
talk("Pilih metode pencarian")
print("1. Knuth-Morris-Prat")
print("2. Boyer-Moore")
print("3. Regular expression")
choice = int(input("Anda : "))
while(True):
if(choice >= 1) and (choice <= 3):
string = str(input("Anda : "))
if(string == "end"):
break
string = string.replace("?","")
string = removeStopWords(string)
if(choice == 1):
useKMP(string)
elif(choice == 2):
useBM(string)
elif(choice == 3):
useRegex(string)
else:
talk("Invalid input!! Masukkan kembali pilihan Anda")
choice = int(input("Anda : "))
def Execute():
initDB()
chatLog = open("chatLog.txt","r")
for line in chatLog:
getQuestion = line
getQuestion = getQuestion.strip()
getQuestion = getQuestion.replace("?","")
getQuestion = removeStopWords(getQuestion)
if(sys.argv[1] == '1'):
useKMP(getQuestion)
elif(sys.argv[1] == '2'):
useBM(getQuestion)
elif(sys.argv[1] == '3'):
useRegex(getQuestion)
#DebugAll()
#DebugKMP()
#DebugBM()
#DebugRegex
Execute()
| [
"45161697+hendpraz@users.noreply.github.com"
] | 45161697+hendpraz@users.noreply.github.com |
bfd774f91b26d227ba70c15082fed0194b86585b | c02e5e0730a04b0a16c68d8aad928daedd770948 | /App/forms.py | 6803dbdd618672636a38b52907a07a83f6c8f902 | [
"MIT"
] | permissive | ashtonfei/flask-mini-app | 11c005f05496d655078a259cc70347c9664e0738 | 5a825665caef257d1f0fe3a670fcd1cea650688e | refs/heads/main | 2023-03-30T18:12:44.430341 | 2021-04-09T15:46:06 | 2021-04-09T15:46:06 | 355,441,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, EqualTo, Email
class LoginForm(FlaskForm):
email = StringField(label='Email', validators=[DataRequired(), Email()])
password = PasswordField(label='Password', validators=[
DataRequired(), Length(min=6)])
submit = SubmitField(label='Log In')
class RegisterForm(FlaskForm):
username = StringField(label='User name', validators=[
Length(min=3, max=12), DataRequired()])
email = StringField(label='Email', validators=[DataRequired(), Email()])
password = PasswordField(label='Password', validators=[
Length(min=6), DataRequired()])
password_confirm = PasswordField(
label='Confirm password', validators=[EqualTo('password'), DataRequired()])
first_name = StringField(label='First name', validators=[DataRequired()])
middle_name = StringField(label='Middle name', validators=[])
last_name = StringField(label='Last name', validators=[DataRequired()])
phone = StringField(label='Phone', validators=[DataRequired()])
gender = SelectField(label='Gender', choices=['Male', 'Female'], validators=[
DataRequired()], default="Male")
submit = SubmitField(label='Register')
| [
"yunjia.fei@gmail.com"
] | yunjia.fei@gmail.com |
c27426914cd5012c8f9639773cee57f0c16aeee3 | 57972581decd1707834a58eefa3b77e9ed24bf28 | /service_2/__init__.py | 4e910ebf3bd4d91cd523d337209cb03068cfa80c | [] | no_license | abhyasgiri/milestone-serverless-project | 8e671288adf3abbf71217e42c3984152da967571 | 3e1a67859fd0289ae56772b4b5079fa43452a1a4 | refs/heads/main | 2023-02-23T01:30:23.132175 | 2021-02-02T16:44:01 | 2021-02-02T16:44:01 | 335,349,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | import logging
import random
from string import ascii_lowercase
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
letters = ""
for _ in range(5):
letters += random.choice(ascii_lowercase)
return func.HttpResponse(
letters,
status_code=200
) | [
"abhyasgiri@outlook.com"
] | abhyasgiri@outlook.com |
0ef417ef2ea2ab51e1240c4fc86e2f26be2e0302 | 509d717f18caad77e00c3261dcf1934f7e5bd95d | /venv/css_selectors/sports_bet_page_locators.py | 65e85e02577a73a8730a604977beb681bc7cbdcc | [] | no_license | Swingyboy/pronet_design_testing | 8aee2f42e2452ca178fbe34e7a51ce7377156e08 | ad3dc5a58983ed6d6c9cef91a40ea8160f699dd0 | refs/heads/master | 2023-05-06T05:34:47.438023 | 2020-09-15T09:17:36 | 2020-09-15T09:17:36 | 281,055,876 | 1 | 1 | null | 2021-06-02T02:56:51 | 2020-07-20T08:12:21 | Python | UTF-8 | Python | false | false | 403 | py | from selenium.webdriver.common.by import By
class SportsBetPageLocators():
UPCOMING_EVENTS_BAR =(By.CSS_SELECTOR, 'upcoming-events > div > div.modul-header')
LIVE_BET_BAR = (By.CSS_SELECTOR, 'live-at-now > div > div.modul-header')
ESPORTS_BAR = (By.CSS_SELECTOR, 'app-esports > div > div.modul-header')
TODAY_EVENT_BAR = (By.CSS_SELECTOR, 'todays-sport-types > div > div.modul-header') | [
"kedonosec@gmail.com"
] | kedonosec@gmail.com |
b0af71064e926490ac415e9930d72e7cccec1d8c | 7464f15c33c74454f2a98dceb7f603919abba4d1 | /happy.py | 01383a2a50c7506bb341600a3deaf9076a692953 | [] | no_license | willingc/my-bit | 374bece797c59956e500504cd62940a2c1718013 | 535768dcb09297f1028e0e111fd062b91e8032c6 | refs/heads/master | 2016-08-08T21:26:22.119643 | 2015-11-30T03:23:59 | 2015-11-30T03:23:59 | 47,053,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | """
happy.py
by Carol Willing
November 28, 2015
Public Domain
Use this to display a 'Happy Face' image on micro:bit's 5x5 pixel grid of LEDs.
Remember... Writing a program is similar to planning a birthday party.
Program Birthday party
------- --------------
'Prepare' Prepare the room with balloons; order food; pick up a cake.
'Do' Do things during the party -- sing, dance, play videogames.
'Clean' Clean the table. Tidy up after the party. Take out the rubbish.
"""
from microbit import *
# Prepare. Put the preinstalled images into user friendly variables
my_happy_face = Image.HAPPY
my_sad_face = Image.SAD
# Do things! ----> Show the images on the display.
display.show(my_happy_face)
sleep(8000)
display.show(my_sad_face)
sleep(8000)
display.show(my_happy_face)
sleep(4000)
# Clean up stuff. Display 'BYE' and clear display. (Clean your room too.)
display.scroll("BYE")
display.clear()
| [
"carolcode@willingconsulting.com"
] | carolcode@willingconsulting.com |
79722b7ad6e4e2c4ed519da6d093a3f52c9824bf | f56d915f46d779b9ed07a8b6bb048b688865cd7b | /passette.py | 97f4c671482303943a387b2184856c6fc5a118fc | [] | no_license | deskofcraig/spotifyplayer | 42a252a92ab5d9cc3a1ed7bb68b886579bd1178a | 759deb180e345712c09a07e1d5e0fa55dd747e3a | refs/heads/master | 2020-04-11T05:42:33.882169 | 2019-01-19T02:47:32 | 2019-01-19T02:47:32 | 161,557,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | #import the GPIO and time package
import RPi.GPIO as GPIO
import time
import os
#from mopidy import core
GPIO.setmode(GPIO.BOARD)
#yellow/back button
GPIO.setup(37, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#red/pause button
GPIO.setup(36, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#green/play button
GPIO.setup(33, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#white/next button
GPIO.setup(32, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#encoder | A - red | C - black | B - yellow
#pinA
GPIO.setup(29, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
#pinB
GPIO.setup(31, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
count = 0
counter = 10
pinALast = GPIO.input(29)
pinBLast = GPIO.input(31)
pinCLast = pinALast ^ pinBLast
encoderMin = 0
encoderMax = 100
inc = 1
last_state = pinALast * 4 + pinBLast * 2 + pinCLast * 1
#system on start
os.system("mpc volume 10")
os.system("mpc add spotify:track:6jXPZid0KLorvgIDP6TiSo")
os.system("mpc add spotify:track:5GjPQ0eI7AgmOnADn1EO6Q")
os.system("mpc add spotify:track:6r20M5DWYdIoCDmDViBxuz")
os.system("mpc add spotify:track:17S4XrLvF5jlGvGCJHgF51")
while True:
#back/yellow button
if GPIO.input(37) == GPIO.HIGH:
os.system("mpc prev")
print("'back' was pushed!")
time.sleep(.3)
#pause/red button
if GPIO.input(36) == GPIO.HIGH:
os.system("mpc pause")
print("'pause' was pushed!")
time.sleep(.3)
#play/green button
if GPIO.input(33) == GPIO.HIGH:
os.system("mpc toggle")
print("'play' was pushed!")
time.sleep(.3)
#next/white button
if GPIO.input(32) == GPIO.HIGH:
os.system("mpc next")
print("'next' was pushed!")
time.sleep(.3)
#encoder
pinA = GPIO.input(29)
pinB = GPIO.input(31)
pinC = pinA ^ pinB
new_state = pinA * 4 + pinB * 2 + pinC * 1
delta = (new_state - last_state) % 4
# delta | pinA | pinB | pinC | new_state
# ======================================
# 0 | 0 | 0 | 0 | 0
# 1 | 1 | 0 | 1 | 5
# 2 | 1 | 1 | 0 | 6
# 3 | 0 | 1 | 1 | 3
# https://bobrathbone.com/raspberrypi/documents/Raspberry%20Rotary%20Encoders.pdf
if (new_state != last_state):
count += 1
if (count % 4 == 1):
if (delta == 3):
counter += inc
if (counter > encoderMax):
counter = 100
else:
counter -= inc
if (counter < encoderMin):
counter = 0
volume = "mpc volume " + str(int(counter))
os.system(volume)
last_state = new_state
| [
"noreply@github.com"
] | deskofcraig.noreply@github.com |
e9ad5aee994bfbdd74e6f30e6aa132036122e60b | 437b5b668d6d2c6f089bbeabb2676db46d8cdd07 | /temp_ach_multiprocess.py | 0037b9c2e1f8db810f86f2e2c4c54c41b7643c81 | [
"MIT"
] | permissive | marcelosalles/idf-creator | 4e53d185ab42b7f14dbd2d83e9ffe7e015ef07c3 | 399a68dbee9d275e79df75c55acdec5f246ff07f | refs/heads/master | 2020-03-18T13:03:40.804518 | 2018-12-21T19:33:49 | 2018-12-21T19:33:49 | 134,758,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | # Return EHF from multiple simulation results of Operative Temperature
import argparse
import csv
import datetime
import glob
from multiprocessing import Pool
import os
import pandas as pd
FOLDER_STDRD = 'cluster'
LEN_FOLDER_NAME = len(FOLDER_STDRD) + 2
BASE_DIR = '/media/marcelo/OS/LabEEE_1-2/idf-creator/single_12_20/'
# BASE_DIR = 'D:/LabEEE_1-2/idf-creator/sobol_single'
MONTH_MEANS = pd.read_csv('month_means_8760.csv')
MAX_THREADS = 18
# SIMULATIONS = 108000
# N_CLUSTERS = 18
# batch = SIMULATIONS/N_CLUSTERS
def process_folder(folder):
line = 0
folder_name = folder[len(folder)-LEN_FOLDER_NAME:]
# folder_name = folder[len(folder)-LEN_FOLDER_NAME:]
os.chdir(folder) # BASE_DIR+'/'+
# pre_epjson_files = glob.glob('*.epJSON')
# i_cluster = int(folder[-1])
# ok_list = ['sobol_single_'+'{:05.0f}'.format(i)+'.epJSON' for i in range(int(i_cluster*batch),int(i_cluster*batch+batch))]
epjson_files = glob.glob('*.err') # epJSON') # []
print(len(epjson_files))
# for f in pre_epjson_files:
# if f in ok_list:
# epjson_files.append(f)
df_temp = {
'folder': [],
'file': [],
'temp': [],
'ach': [],
'ehf': []
}
for file in epjson_files:
print(line,' ',file, end='\r')
line += 1
csv_file = file[:-7]+'out.csv'
df = pd.read_csv(csv_file)
df_temp['file'].append(file[:-7])
df_temp['folder'].append(folder_name)
df_temp['temp'].append((df['OFFICE:Zone Operative Temperature [C](Hourly)'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0]).mean())
df_temp['ach'].append((df['OFFICE:AFN Zone Infiltration Air Change Rate [ach](Hourly)'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0]).mean())
df['E_hot'] = -1
df['sup_lim'] = MONTH_MEANS['mean_temp'] + 3.5
df.loc[df['OFFICE:Zone Operative Temperature [C](Hourly)'] > df['sup_lim'], 'E_hot'] = 1
df.loc[df['OFFICE:Zone Operative Temperature [C](Hourly)'] <= df['sup_lim'], 'E_hot'] = 0
df_temp['ehf'].append(df['E_hot'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0].mean())
df_output = pd.DataFrame(df_temp)
df_output.to_csv('means_{}.csv'.format(folder_name), index=False)
print('\tDone processing folder \'{}\''.format(folder_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process output data from Energyplus.')
parser.add_argument('-t',
action='store',
type=int,
help='runs T threads')
args = parser.parse_args()
folders = glob.glob(BASE_DIR+FOLDER_STDRD+'*')
print('Processing {} folders in \'{}\':'.format(len(folders), BASE_DIR))
for folder in folders:
print('\t{}'.format(folder))
start_time = datetime.datetime.now()
if args.t:
p = Pool(args.t)
p.map(process_folder, folders)
else:
num_folders = len(folders)
p = Pool(min(num_folders, MAX_THREADS))
p.map(process_folder, folders)
end_time = datetime.datetime.now()
total_time = (end_time - start_time)
print("Total processing time: " + str(total_time))
| [
"marcelosalles@github.com"
] | marcelosalles@github.com |
5527d366f62eeca1618526aeba69c022f62e9b48 | fb887b712b05c5e2a3ab1a02d9349c246fc06922 | /app/migrations/0003_auto_20210316_2151.py | 5cba198802eba0fe273af13aee79f4ead5b43c65 | [] | no_license | kousik-prabu-git/SafeNest | 3347b139488053d42abd4997cf6f9e3a11542e1d | b9368a497e37b2076683eb00e7d8f8644647b903 | refs/heads/master | 2023-04-14T12:15:47.598980 | 2021-05-02T08:28:27 | 2021-05-02T08:28:27 | 347,817,753 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | # Generated by Django 3.1.3 on 2021-03-16 16:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0002_auto_20210316_1056'),
]
operations = [
migrations.AddField(
model_name='activity',
name='reporter',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Reporter', to='auth.user'),
preserve_default=False,
),
migrations.AlterField(
model_name='activity',
name='volunteer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Volunteer', to=settings.AUTH_USER_MODEL),
),
]
| [
"uniqfocuz@gmail.com"
] | uniqfocuz@gmail.com |
fdece734fd20e95f571e250da5d3d0bb56e27b8a | 5b029c81490df1cd2988108ed23e71aca40a9816 | /MusicPredictiveAnalysis_EE660_USCFall2015-master/Code/Machine_Learning_Algos/10k_Tests/ml_classification_simple_pca.py | a9f5f7a9d11effe0c958f66898e4d78676120bad | [
"MIT"
] | permissive | lianghaol/Machine-Learning | e9b9ea009629ee1a0e24a0e55d90a3ed3c92ec05 | e3353252ca54b62ff8f5ada87566bab4f373c260 | refs/heads/master | 2021-12-10T19:19:34.914637 | 2016-09-16T02:48:30 | 2016-09-16T02:48:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | __author__ = 'NishantNath'
# !/usr/bin/env python
'''
Using : Python 2.7+ (backward compatibility exists for Python 3.x if separate environment created)
Required files : hdf5_getters.py
Required packages : numpy, pandas, matplotlib, sklearn
Steps:
1.
# Uses Simple PCA to find the most important features
# Uses Simple PCA Iteratively to find performance based on number of components
'''
import pandas
import matplotlib.pyplot as mpyplot
import pylab
import numpy
from itertools import cycle
def plot_2D(data, target, target_names):
colors = cycle('rgbcmykw')
target_ids = range(len(target_names))
mpyplot.figure()
for i, c, label in zip(target_ids, colors, target_names):
mpyplot.scatter(data[target == i, 0], data[target == i, 1],c=c, label=label)
mpyplot.legend()
# mpyplot.show(p)
# [0: 'CLASSICAL', 1: 'METAL', 2: 'HIPHOP', 3: 'DANCE', 4: 'JAZZ']
# [5:'FOLK', 6: 'SOUL', 7: 'ROCK', 8: 'POP', 9: 'BLUES']
col_input=['genre', 'year', 'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9', 'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17', 'col18', 'col19', 'col20', 'col21', 'col22', 'col23', 'col24', 'col25', 'col26', 'col27', 'col28', 'col29', 'col30', 'col31', 'col32', 'col33', 'col34', 'col35', 'col36', 'col37', 'col38', 'col39', 'col40', 'col41', 'col42', 'col43', 'col44', 'col45', 'col46', 'col47', 'col48', 'col49', 'col50', 'col51', 'col52', 'col53', 'col54', 'col55', 'col56', 'col57', 'col58', 'col59', 'col60', 'col61', 'col62', 'col63', 'col64', 'col65', 'col66', 'col67', 'col68', 'col69', 'col70', 'col71', 'col72']
df_input = pandas.read_csv('pandas_output_missing_data_fixed.csv', header=None, delimiter = ",", names=col_input)
# range(2,74) means its goes from col 2 to col 73
df_input_data = df_input[list(range(2, 74))]
df_input_target = df_input[list(range(0, 1))]
colors = numpy.random.rand(len(df_input_target))
# Simple PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=6) #from optimal pca components chart n_components=6
pca.fit(df_input_data)
# Relative weights on features
print pca.explained_variance_ratio_
print pca.components_
# performance of number of components vs variance
pca2 = PCA().fit(df_input_data)
# Plotting Simple PCA
mpyplot.figure(1)
p1 = mpyplot.plot(numpy.cumsum(pca2.explained_variance_ratio_))
mpyplot.xlabel('number of components')
mpyplot.ylabel('cumulative explained variance')
mpyplot.show(p1)
# Reduced Feature Set
df_input_data_reduced = pca.transform(df_input_data)
# Plotting Reduced Feature Set
mpyplot.figure(2)
p2 = mpyplot.scatter(df_input_data_reduced[:, 0], df_input_data_reduced[:, 1], c=colors)
mpyplot.colorbar(p2)
mpyplot.show(p2)
# Plotting in 2D - fix this
mpyplot.figure(3)
plot_2D(df_input_data_reduced, df_input_target, pandas.unique(df_input_target)) | [
"ozbekahmetcan@gmail.com"
] | ozbekahmetcan@gmail.com |
f8f8a93e2b53a4b74d0c41930fd04e417f2189c8 | 2f418a0f2fcca40f84ec0863b31ff974b574350c | /scripts/addons_extern/cut_mesh-master/op_slice/slice_datastructure.py | 6c86f20d47db1178d36c9ecde0f011a0e1296f6c | [] | no_license | JT-a/blenderpython279 | 57a81b55564218f3b1417c2ffa97f5161897ec79 | 04846c82f794c22f87d677d9eb8cec1d05c48cda | refs/heads/master | 2021-06-25T06:58:07.670613 | 2017-09-11T11:14:36 | 2017-09-11T11:14:36 | 103,723,697 | 4 | 2 | null | 2017-09-16T04:09:31 | 2017-09-16T04:09:31 | null | UTF-8 | Python | false | false | 7,750 | py | '''
Created on Oct 8, 2015
@author: Patrick
'''
import time
import bpy
import bmesh
from mathutils import Vector, Matrix, kdtree
from mathutils.bvhtree import BVHTree
from mathutils.geometry import intersect_point_line, intersect_line_plane
from bpy_extras import view3d_utils
from ..bmesh_fns import grow_selection_to_find_face, flood_selection_faces, edge_loops_from_bmedges
from ..cut_algorithms import cross_section_2seeds_ver1, path_between_2_points
from ..geodesic import geodesic_walk, continue_geodesic_walk, gradient_descent
from .. import common_drawing
class Slice(object):
'''
A class which manages user placed points on an object to create a
piecewise path of geodesics, adapted to the objects surface.
'''
def __init__(self,context, cut_object):
self.cut_ob = cut_object
self.bme = bmesh.new()
self.bme.from_mesh(cut_object.data)
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
#non_tris = [f for f in self.bme.faces if len(f.verts) > 3]
#bmesh.ops.triangulate(self.bme, faces = non_tris, quad_method = 0, ngon_method = 0)
#non_tris = [f for f in self.bme.faces if len(f.verts) > 3]
#if len(non_tris):
#geom = bmesh.ops.connect_verts_concave(self.bme, non_tris)
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
self.bvh = BVHTree.FromBMesh(self.bme)
self.seed = None
self.seed_loc = None
self.target = None
self.target_loc = None
self.path = []
def reset_vars(self):
'''
'''
self.seed = None
self.seed_loc = None
self.target = None
self.target_loc = None
self.geo_data = [dict(), set(), set(), set()] #geos, fixed, close, far
self.path = []
def grab_initiate(self):
if self.target != None :
self.grab_undo_loc = self.target_loc
self.target_undo = self.target
self.path_undo = self.path
return True
else:
return False
def grab_mouse_move(self,context,x,y):
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
loc2, no2, face_ind2, d = self.bvh.ray_cast(imx * ray_origin, view_vector)
if loc != None and loc2 != None:
print((loc - loc2).length)
if face_ind == -1:
self.grab_cancel()
return
self.target = self.bme.faces[face_ind]
self.target_loc = loc
vrts, eds, ed_cross, f_cross, error = path_between_2_points(self.bme, self.bvh, mx,mx* self.seed_loc,mx*self.target_loc,
max_tests = 10000, debug = True,
prev_face = None, use_limit = True)
if not error:
self.path = vrts
#else:
#self.path = []
def grab_cancel(self):
self.target_loc = self.grab_undo_loc
self.target = self.target_undo
self.path = self.path_undo
return
def grab_confirm(self):
self.grab_undo_loc = None
self.target_undo = None
self.path_undo = []
return
def click_add_seed(self,context,x,y):
'''
x,y = event.mouse_region_x, event.mouse_region_y
this will add a point into the bezier curve or
close the curve into a cyclic curve
'''
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
if face_ind == -1:
self.selected = -1
return
self.seed = self.bme.faces[face_ind]
self.seed_loc = loc
self.geo_data = [dict(), set(), set(), set()]
def click_add_target(self, context, x, y):
region = context.region
rv3d = context.region_data
coord = x, y
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + (view_vector * 1000)
mx = self.cut_ob.matrix_world
imx = mx.inverted()
if bversion() < '002.077.000':
loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target)
else:
res, loc, no, face_ind = self.cut_ob.ray_cast(imx * ray_origin, imx * ray_target - imx * ray_origin)
if face_ind == -1: return
self.target = self.bme.faces[face_ind]
self.target_loc = loc
vrts, eds, ed_cross, f_cross, error = path_between_2_points(self.bme, self.bvh, mx,mx* self.seed_loc,mx*self.target_loc,
max_tests = 10000, debug = True,
prev_face = None, use_limit = True)
if not error:
self.path = vrts
else:
self.path = []
return
def draw(self,context):
if len(self.path):
mx = self.cut_ob.matrix_world
pts = [mx * v for v in self.path]
common_drawing.draw_polyline_from_3dpoints(context, pts, (.2,.1,.8,1), 3, 'GL_LINE')
if self.seed_loc != None:
mx = self.cut_ob.matrix_world
common_drawing.draw_3d_points(context, [mx * self.seed_loc], 8, color = (1,0,0,1))
if self.target_loc != None:
mx = self.cut_ob.matrix_world
common_drawing.draw_3d_points(context, [mx * self.target_loc], 8, color = (0,1,0,1))
class PolyCutPoint(object):
def __init__(self,co):
self.co = co
self.no = None
self.face = None
self.face_region = set()
def find_closest_non_manifold(self):
return None
class NonManifoldEndpoint(object):
def __init__(self,co, ed):
if len(ed.link_faces) != 1:
return None
self.co = co
self.ed = ed
self.face = ed.link_faces[0]
| [
"meta.androcto1@gmail.com"
] | meta.androcto1@gmail.com |
be23dca58eab757909e1b01ac74a7f2f65028785 | ee9ddec6307ab76a567b4001cee47278d503b3da | /01. Naive Bayes/01. Spam filtering/classifiers/NaiveBayesClassifier.py | ba39bc3088615798e97486a6ab994a7f35f41607 | [] | no_license | Phil9l/probabilistic-graphical-models | ebf6f6366169f6e4cec72a0199a330a1e350818d | 9471b79ad7d8f0a511ae94a3719132592c5f79a7 | refs/heads/master | 2021-01-18T17:23:56.035485 | 2017-03-31T08:26:30 | 2017-03-31T08:26:30 | 86,793,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from collections import defaultdict
__all__ = ['NaiveBayesClassifier']
class NaiveBayesClassifier:
def __init__(self):
self._class_data = defaultdict(dict)
def train(self, data, cls):
raise NotImplementedError
def predict(self, data):
raise NotImplementedError
| [
"phil9lne@gmail.com"
] | phil9lne@gmail.com |
c70b445d6d1bb1da816fcacacadb68decd13d563 | b424a13f032d5a607e6df4dd78bc47ad1d06a147 | /astroquery/simbad/tests/test_simbad.py | fe66d82dc76fea148ff9163e36a89ec61940870a | [] | no_license | EnjoyLifeFund/macSierra-py36-pkgs | 1e7eeb9b55415da6eb12465d67730d76e9cc619a | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | refs/heads/master | 2021-01-20T10:23:50.044019 | 2017-09-05T02:53:26 | 2017-09-05T02:53:26 | 90,333,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,899 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
from astropy.extern import six
import pytest
import astropy.units as u
from astropy.table import Table
import numpy as np
from ... import simbad
from ...utils.testing_tools import MockResponse
from ...utils import commons
from ...exceptions import TableParseError
from .test_simbad_remote import multicoords
GALACTIC_COORDS = commons.GalacticCoordGenerator(l=-67.02084, b=-29.75447,
unit=(u.deg, u.deg))
ICRS_COORDS = commons.ICRSCoordGenerator("05h35m17.3s -05h23m28s")
FK4_COORDS = commons.FK4CoordGenerator(ra=84.90759, dec=-80.89403,
unit=(u.deg, u.deg))
FK5_COORDS = commons.FK5CoordGenerator(ra=83.82207, dec=-80.86667,
unit=(u.deg, u.deg))
DATA_FILES = {
'id': 'query_id.data',
'coo': 'query_coo.data',
'cat': 'query_cat.data',
'bibobj': 'query_bibobj.data',
'bibcode': 'query_bibcode.data',
'objectids': 'query_objectids.data',
'error': 'query_error.data',
'sample': 'query_sample.data',
'region': 'query_sample_region.data',
}
class MockResponseSimbad(MockResponse):
query_regex = re.compile(r'query\s+([a-z]+)\s+')
def __init__(self, script, cache=True, **kwargs):
# preserve, e.g., headers
super(MockResponseSimbad, self).__init__(**kwargs)
self.content = self.get_content(script)
def get_content(self, script):
match = self.query_regex.search(script)
if match:
filename = DATA_FILES[match.group(1)]
content = open(data_path(filename), "rb").read()
return content
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_post(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(simbad.SimbadClass, '_request', post_mockreturn)
return mp
def post_mockreturn(self, method, url, data, timeout, **kwargs):
response = MockResponseSimbad(data['script'], **kwargs)
class last_query(object):
pass
self._last_query = last_query()
self._last_query.data = data
return response
@pytest.mark.parametrize(('radius', 'expected_radius'),
[('5d0m0s', '5.0d'),
('5d', '5.0d'),
('5.0d', '5.0d'),
(5 * u.deg, '5.0d'),
(5.0 * u.deg, '5.0d'),
(1.2 * u.deg, '1.2d'),
(0.5 * u.deg, '30.0m'),
('0d1m12s', '1.2m'),
(0.003 * u.deg, '10.8s'),
('0d0m15s', '15.0s')
])
def test_parse_radius(radius, expected_radius):
actual = simbad.core._parse_radius(radius)
assert actual == expected_radius
@pytest.mark.parametrize(('ra', 'dec', 'expected_ra', 'expected_dec'),
[(ICRS_COORDS.ra, ICRS_COORDS.dec, u'5:35:17.3',
u'-80:52:00')
])
def test_to_simbad_format(ra, dec, expected_ra, expected_dec):
actual_ra, actual_dec = simbad.core._to_simbad_format(ra, dec)
assert (actual_ra, actual_dec) == (expected_ra, expected_dec)
@pytest.mark.parametrize(('coordinates', 'expected_frame'),
[(GALACTIC_COORDS, 'GAL'),
(ICRS_COORDS, 'ICRS'),
(FK4_COORDS, 'FK4'),
(FK5_COORDS, 'FK5')
])
def test_get_frame_coordinates(coordinates, expected_frame):
actual_frame = simbad.core._get_frame_coords(coordinates)[2]
assert actual_frame == expected_frame
if actual_frame == 'GAL':
l, b = simbad.core._get_frame_coords(coordinates)[:2]
np.testing.assert_almost_equal(float(l) % 360, -67.02084 % 360)
np.testing.assert_almost_equal(float(b), -29.75447)
def test_parse_result():
result1 = simbad.core.Simbad._parse_result(
MockResponseSimbad('query id '), simbad.core.SimbadVOTableResult)
assert isinstance(result1, Table)
with pytest.raises(TableParseError) as ex:
simbad.core.Simbad._parse_result(MockResponseSimbad('query error '),
simbad.core.SimbadVOTableResult)
assert str(ex.value) == ('Failed to parse SIMBAD result! The raw response '
'can be found in self.last_response, and the '
'error in self.last_table_parse_error. '
'The attempted parsed result is in '
'self.last_parsed_result.\n Exception: 7:115: '
'no element found')
assert isinstance(simbad.Simbad.last_response.text, six.string_types)
assert isinstance(simbad.Simbad.last_response.content, six.binary_type)
votable_fields = ",".join(simbad.core.Simbad.get_votable_fields())
@pytest.mark.parametrize(('args', 'kwargs', 'expected_script'),
[(["m [0-9]"], dict(wildcard=True,
caller='query_object_async'),
("\nvotable {" + votable_fields + "}\n"
"votable open\n"
"query id wildcard m [0-9] \n"
"votable close"
)),
(["2006ApJ"], dict(caller='query_bibcode_async',
get_raw=True),
("\n\nquery bibcode 2006ApJ \n"))
])
def test_args_to_payload(args, kwargs, expected_script):
script = simbad.Simbad._args_to_payload(*args, **kwargs)['script']
assert script == expected_script
@pytest.mark.parametrize(('epoch', 'equinox'),
[(2000, 'thousand'),
('J-2000', None),
(None, '10e3b')
])
def test_validation(epoch, equinox):
with pytest.raises(ValueError):
# only one of these has to raise an exception
if equinox is not None:
simbad.core.validate_equinox(equinox)
if epoch is not None:
simbad.core.validate_epoch(epoch)
@pytest.mark.parametrize(('bibcode', 'wildcard'),
[('2006ApJ*', True),
('2005A&A.430.165F', None)
])
def test_query_bibcode_async(patch_post, bibcode, wildcard):
response1 = simbad.core.Simbad.query_bibcode_async(bibcode,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_bibcode_async(bibcode,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibcode_class(patch_post):
result1 = simbad.core.Simbad.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result1, Table)
def test_query_bibcode_instance(patch_post):
S = simbad.core.Simbad()
result2 = S.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result2, Table)
def test_query_objectids_async(patch_post):
response1 = simbad.core.Simbad.query_objectids_async('Polaris')
response2 = simbad.core.Simbad().query_objectids_async('Polaris')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_objectids(patch_post):
result1 = simbad.core.Simbad.query_objectids('Polaris')
result2 = simbad.core.Simbad().query_objectids('Polaris')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_bibobj_async(patch_post):
response1 = simbad.core.Simbad.query_bibobj_async('2005A&A.430.165F')
response2 = simbad.core.Simbad().query_bibobj_async('2005A&A.430.165F')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibobj(patch_post):
result1 = simbad.core.Simbad.query_bibobj('2005A&A.430.165F')
result2 = simbad.core.Simbad().query_bibobj('2005A&A.430.165F')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_catalog_async(patch_post):
response1 = simbad.core.Simbad.query_catalog_async('m')
response2 = simbad.core.Simbad().query_catalog_async('m')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_catalog(patch_post):
result1 = simbad.core.Simbad.query_catalog('m')
result2 = simbad.core.Simbad().query_catalog('m')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000'),
(multicoords, 0.5*u.arcsec, 2000.0, 'J2000'),
])
def test_query_region_async(patch_post, coordinates, radius, equinox, epoch):
response1 = simbad.core.Simbad.query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
response2 = simbad.core.Simbad().query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000')
])
def test_query_region(patch_post, coordinates, radius, equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, 0, 2000.0, 'J2000')])
def test_query_region_radius_error(patch_post, coordinates, radius,
equinox, epoch):
with pytest.raises(u.UnitsError):
simbad.core.Simbad.query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
with pytest.raises(u.UnitsError):
simbad.core.Simbad().query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, "0d", 2000.0, 'J2000'),
(GALACTIC_COORDS, 1.0 * u.marcsec, 2000.0, 'J2000')
])
def test_query_region_small_radius(patch_post, coordinates, radius,
equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True)
])
def test_query_object_async(patch_post, object_name, wildcard):
response1 = simbad.core.Simbad.query_object_async(object_name,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_object_async(object_name,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True),
])
def test_query_object(patch_post, object_name, wildcard):
result1 = simbad.core.Simbad.query_object(object_name,
wildcard=wildcard)
result2 = simbad.core.Simbad().query_object(object_name,
wildcard=wildcard)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_list_votable_fields():
simbad.core.Simbad.list_votable_fields()
simbad.core.Simbad().list_votable_fields()
def test_get_field_description():
simbad.core.Simbad.get_field_description('bibcodelist(y1-y2)')
simbad.core.Simbad().get_field_description('bibcodelist(y1-y2)')
with pytest.raises(Exception):
simbad.core.Simbad.get_field_description('xyz')
def test_votable_fields():
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
try:
simbad.core.Simbad.add_votable_fields('z')
except KeyError:
pass # this is the expected response
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
simbad.core.Simbad.reset_votable_fields()
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
def test_query_criteria1(patch_post):
Simbad = simbad.core.Simbad()
result = Simbad.query_criteria(
"region(box, GAL, 49.89 -0.3, 0.5d 0.5d)", otype='HII')
assert isinstance(result, Table)
assert "region(box, GAL, 49.89 -0.3, 0.5d 0.5d)" in Simbad._last_query.data['script']
def test_query_criteria2(patch_post):
S = simbad.core.Simbad()
S.add_votable_fields('ra(d)', 'dec(d)')
S.remove_votable_fields('coordinates')
assert S.get_votable_fields() == ['main_id', 'ra(d)', 'dec(d)']
result = S.query_criteria(otype='SNR')
assert isinstance(result, Table)
assert 'otype=SNR' in S._last_query.data['script']
def test_simbad_settings1():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates', 'dec(5)'])
simbad.core.Simbad.reset_votable_fields()
def test_simbad_settings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
# this is now allowed:
simbad.core.Simbad.add_votable_fields('ra(d)', 'dec(d)')
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates',
'ra', 'dec(5)', 'ra(d)',
'dec(d)']
# cleanup
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('fluxdata(J)')
simbad.core.Simbad.add_votable_fields('fluxdata(H)')
simbad.core.Simbad.add_votable_fields('fluxdata(K)')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates',
'fluxdata(J)', 'fluxdata(H)', 'fluxdata(K)'])
simbad.core.Simbad.remove_votable_fields('fluxdata', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_issue388():
# This is a python-3 issue: content needs to be decoded?
response = MockResponseSimbad('\nvotable {main_id,coordinates}\nvotable '
'open\nquery id m1 \nvotable close')
with open(data_path('m1.data'), "rb") as f:
response.content = f.read()
parsed_table = simbad.Simbad._parse_result(response,
simbad.core.SimbadVOTableResult)
assert parsed_table['MAIN_ID'][0] == b'M 1'
assert len(parsed_table) == 1
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
2bfaceaec7ad594a098bc8fdcd309b8ee2a0c70d | 6e42b85d0deb68eeddf18bb1849daf0ee6fc0df1 | /main/tests/test_views.py | 978e99dee0b28789949a209162916459f411d351 | [] | no_license | NotSecretEmmet/TipsCalculator | 8980be28412cf4c7353d5a3a4260c19c436a9a85 | 5251d1ddeaf56e9f968e8d45f265c97ddd328698 | refs/heads/main | 2023-03-13T06:51:31.422859 | 2021-03-03T16:32:19 | 2021-03-03T16:32:19 | 316,184,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth.models import User
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('johnlennon', 'lennon@thebeatles.com', 'johnpassword')
def test_home_view_GET(self):
self.client.force_login(self.user)
response = self.client.get(reverse('main-home'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'main/home.html')
def test_faq_view_GET(self):
self.client.force_login(self.user)
response = self.client.get(reverse('main-faq'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'main/faq.html')
| [
"emmet@emkit.nl"
] | emmet@emkit.nl |
dc2a482aa68540bad4c20f140f3fc7b3df59ceef | 0712a5355fb7b2110df802f0630db3f421ffa08e | /MyoGrapher/__init__.py | 5637e8c629fbc81d87154322b678fdf91bbc7f9a | [
"MIT"
] | permissive | nullp0tr/MyoGrapher | 6f404bc29034ce585742b0eac7798981d82867ee | 153dc26a77585181e8a25f427c7c5493767e20ab | refs/heads/master | 2021-01-01T18:13:54.648687 | 2017-12-14T17:32:28 | 2017-12-14T17:32:28 | 98,281,130 | 1 | 1 | MIT | 2017-12-14T17:32:29 | 2017-07-25T08:06:36 | Python | UTF-8 | Python | false | false | 3,156 | py | import pygame
class MyoGrapher(object):
def __init__(self, width=1200, height=400):
self.width, self.height = width, height
self.screen = pygame.display.set_mode((self.width, self.height))
self.last_values = []
self.dlast_values = []
def dplot(self, vals, shifts, colors):
division_lines = 4
drift = 5
self.screen.scroll(-drift)
self.screen.fill((0, 0, 0),
(self.width - drift, 0, self.width, self.height))
for n, values in enumerate(vals):
values = [val / float(shifts[n]) for val in values]
if len(self.dlast_values) < len(vals):
self.dlast_values.append(values)
return
for i, (u, v) in enumerate(zip(self.dlast_values[n], values)):
pygame.draw.line(self.screen, colors[n],
(self.width - drift, int(
self.height / division_lines * (
i + 1 - u))),
(self.width, int(
self.height / division_lines * (
i + 1 - v))))
pygame.draw.line(self.screen, (255, 255, 255),
(self.width - drift, int(
self.height / division_lines * (
i + 1))),
(self.width, int(
self.height / division_lines * (
i + 1))))
self.dlast_values[n] = values
def plot(self, values, drawlines=False, curve=True):
if self.last_values is None:
self.last_values = values
return
division_lines = len(values)
drift = 5
self.screen.scroll(-drift)
self.screen.fill((0, 0, 0), (self.width - drift, 0, self.width, self.height))
for i, (u, v) in enumerate(zip(self.last_values, values)):
if drawlines:
pygame.draw.line(self.screen, (0, 255, 0),
(self.width - drift, int(self.height / division_lines * (i + 1 - u))),
(self.width, int(self.height / division_lines * (i + 1 - v))))
pygame.draw.line(self.screen, (255, 255, 255),
(self.width - drift, int(self.height / division_lines * (i + 1))),
(self.width, int(self.height / division_lines * (i + 1))))
else:
c = int(255 * max(0, min(1, v)))
self.screen.fill((c, c, c), (self.width - drift, i * self.height / division_lines, drift,
(i + 1) * self.height / division_lines - i * self.height / division_lines))
if curve:
self.last_values = values
pygame.display.flip()
def emg_plot(self, emg, shift=512, drawlines=False, curve=True):
self.plot([e / float(shift) for e in emg], drawlines=drawlines, curve=curve)
| [
"ahmeds2000x@gmail.com"
] | ahmeds2000x@gmail.com |
d3542bde55fffcec1c5d1a1f2685e6561647f06c | 616133580e0f01adaa6ac4117329e93e6f7ad931 | /Main.py | edb84ae88794d8a239d611b73066c6e7c6e636bc | [] | no_license | ckarnell/kings_cup_app | df09d0c94e9f288eee3df9da72f2b0f32dd52a98 | 9f1eb5ecf7ea6f1f8403887218f37c3d135150ec | refs/heads/master | 2021-01-19T11:26:25.571172 | 2017-02-17T01:33:35 | 2017-02-17T01:33:35 | 82,243,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from Tkinter import *
from time import sleep
from controller.deck import Deck
from static.static import Static
class App:
def __init__(self):
self.deck = Deck()
self.player = 0
self.root = Tk()
self.root.title('King\'s Cup')
# Pack the initial card image.
logo = PhotoImage(file="./static/gifs/K_S.gif")
self.card_image = Label(self.root, image=logo)
self.card_image.image = logo
self.card_image.pack(side='left')
explanation = "King's Cup is a drinking game!\nJust follow the instructions."
self.rule = Label(self.root,
width = 25,
justify=LEFT,
padx = 20,
text=explanation)
self.rule.pack(side="left")
self.button = Button(self.root,
text="Draw!",
command=lambda: self.change())
self.root.bind('<Return>', self.change)
self.button.pack(side='left')
self.root.mainloop()
def change(self, event=None):
card = self.deck.getCard()
# Set the card image.
card_image = PhotoImage(file='./static/gifs/%s_%s.gif' % (card['val'], card['suit']))
self.card_image.config(image=card_image)
self.card_image.image = card_image
self.rule.config(text = Static.cardRules[card['val']])
self.rule.text = Static.cardRules[card['val']]
if __name__ == '__main__':
App()
| [
"cohen.karnell@gmail.com"
] | cohen.karnell@gmail.com |
33a16862ec2f40db072c68c1e4c243096bce805a | abb614790bdf41c7db9d09dfdea4385f78c2be52 | /rtk-RQA/rtk/hardware/component/connection/Socket.py | c1454c5a9c43e324ac69b5e3c374fd2decff5864 | [
"BSD-3-Clause"
] | permissive | codacy-badger/rtk | f981bb75aadef6aaeb5a6fa427d0a3a158626a2a | bdb9392164b0b32b0da53f8632cbe6e3be808b12 | refs/heads/master | 2020-03-19T02:46:10.320241 | 2017-10-26T20:08:12 | 2017-10-26T20:08:12 | 135,659,105 | 0 | 0 | null | 2018-06-01T02:43:23 | 2018-06-01T02:43:23 | null | UTF-8 | Python | false | false | 5,321 | py | #!/usr/bin/env python
"""
######################################################
Hardware.Component.Connection Package IC Socket Module
######################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.hardware.component.connection.Socket.py is part of the RTK
# Project
#
# All rights reserved.
import gettext
import locale
try:
import Configuration
import Utilities
from hardware.component.connection.Connection import Model as Connection
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
from rtk.hardware.component.connection.Connection import Model as \
Connection
__author__ = 'Andrew Rowland'
__email__ = 'andrew.rowland@reliaqual.com'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
# Add localization support.
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class Socket(Connection):
"""
The Socket connection data model contains the attributes and methods of an
IC socket connection component. The attributes of an IC socket connection
are:
:cvar int subcategory: the Connection subcategory.
:ivar float base_hr: the MIL-HDBK-217FN2 base/generic hazard rate.
:ivar str reason: the reason(s) the Connection is overstressed.
:ivar float piE: the MIL-HDBK-217FN2 operating environment factor.
Hazard Rate Models:
# MIL-HDBK-217FN2, section 15.3.
"""
# MIL-HDBK-217FN2 hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_piQ = [1.0, 2.0]
_piE = [1.0, 3.0, 14.0, 6.0, 18.0, 8.0, 12.0, 11.0, 13.0, 25.0, 0.5, 14.0,
36.0, 650.0]
_lambdab_count = [0.0019, 0.0058, 0.027, 0.012, 0.035, 0.015, 0.023, 0.021,
0.025, 0.048, 0.00097, 0.027, 0.070, 1.3]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 74 # Subcategory ID in the common DB.
def __init__(self):
"""
Method to initialize a IC Socket connection data model instance.
"""
super(Socket, self).__init__()
# Define private dictionary attributes.
# Define private list attributes.
# Define private scalar attributes.
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.n_active_contacts = 0
self.piP = 0.0
self.base_hr = 0.00042
def set_attributes(self, values):
"""
Method to set the Multi-Pin Connection data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
(_code, _msg) = Connection.set_attributes(self, values[:133])
try:
self.base_hr = 0.00042
self.piP = float(values[133])
self.n_active_contacts = int(values[134])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Multi-Pin Connection data
model attributes.
:return: (n_active_contacts, piP)
:rtype: tuple
"""
_values = Connection.get_attributes(self)
_values = _values + (self.piP, self.n_active_contacts)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Multi-Pin Connection data
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 1:
self.hazard_rate_model['equation'] = 'lambdab * piQ'
# Quality factor.
self.piQ = self._piQ[self.quality - 1]
elif self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piE * piP'
# Active pins correction factor.
if self.n_active_contacts >= 2:
self.piP = exp(((self.n_active_contacts - 1) / 10.0)**0.51064)
else:
self.piP = 0.0
self.hazard_rate_model['piP'] = self.piP
# Environmental correction factor.
self.piE = self._piE[self.environment_active - 1]
return Connection.calculate_part(self)
| [
"arowland@localhost.localdomain"
] | arowland@localhost.localdomain |
e5a9e28f6005491c144002425c212dd0d5803423 | a2e11ec88ef3c83b9f07129e76a3681a676d164f | /sessionproject3/sessionproject3/wsgi.py | a7a02fa35437ef303c13922290ff105dce0051b2 | [] | no_license | qwertypool/lofo | dadd7cd5b149a3a200b7111d803b1d0195d76642 | 3bc7bd125e7ea5a67f51dd6dd654e38a5f218055 | refs/heads/master | 2022-05-18T09:31:11.456634 | 2020-04-18T14:47:44 | 2020-04-18T14:47:44 | 256,773,858 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for sessionproject3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sessionproject3.settings')
application = get_wsgi_application()
| [
"deepapandey364@gmail.com"
] | deepapandey364@gmail.com |
96d06f52d129d45476dbb90da29283a0859e2776 | f6ff601089f678fecbfa22a4d95c1de225bc34b5 | /code12.py | eea027367cb9c55e95dee5542ce2b7d981997d76 | [] | no_license | Kumar1998/github-upload | 94c1fb50dc1bce2c4b76d83c41be2e0ce57b7fa6 | ab264537200791c87ef6d505d90be0c0a952ceff | refs/heads/master | 2021-07-05T10:14:13.591139 | 2020-07-26T15:47:30 | 2020-07-26T15:47:30 | 143,553,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | x=int(input("Enter 1st number:"))
y=int(input("Enter 2nd number:"))
sum=x+y
average=sum/2
print("Sum of the given two numbers is:",sum)
print("Average of the given two numbers is:",average) | [
"noreply@github.com"
] | Kumar1998.noreply@github.com |
105ba1f6775de7f1b066de7bcf5b3977007dfca9 | 851763767750eea0565b46a339cee37c1273b457 | /Interview Questions/LinkedList/IntersectionPointTwoLinkedList.py | ccd068dd9751c34704630b385017c8ae544406f6 | [] | no_license | sunamya/Data-Structures-in-Python | f076e4b2febe24fee31b05b83574e4e1f344014e | 6c3eec7a4184b93bb18f54071bc0232cb0a76a08 | refs/heads/main | 2023-06-21T02:54:23.652347 | 2021-07-18T16:35:12 | 2021-07-18T16:35:12 | 379,672,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | #User function Template for python3
'''
Function to return the value at point of intersection
in two linked list, connected in y shaped form.
Function Arguments: head_a, head_b (heads of both the lists)
Return Type: value in NODE present at the point of intersection
or -1 if no common point.
Contributed By: Nagendra Jha
{
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
}
'''
def size(node):
cnt=0
while node:
cnt+=1
node=node.next
return cnt
def npo(head1,head2,d):
for i in range(d):
head1=head1.next
while head1 and head2:
if head1==head2:
return head1.data
head1=head1.next
head2=head2.next
return None
#Function to find intersection point in Y shaped Linked Lists.
def intersetPoint(head1,head2):
#code here
diff=size(head1)-size(head2)
if diff<0: #Second list is bigger
return npo(head2,head1,diff)
else:
return npo(head1,head2,diff)
#Another approach using hasking
#Function to find intersection point in Y shaped Linked Lists.
def intersetPoint(head1,head2):
nodes=set()
#code here
while head1:
nodes.add(head1)
head1 = head1.next
# now traverse the second list and find the first node that is
# already present in the set
while head2:
# return the current node if it is found in the set
if head2 in nodes:
return head2.data
head2=head2.next
# we reach here if lists do not intersect
return None
#{
# Driver Code Starts
#Initial Template for Python 3
#Contributed by : Nagendra Jha
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
temp=None
# creates a new node with given value and appends it at the end of the linked list
def append(self, new_node):
if self.head is None:
self.head = new_node
self.temp = self.head
return
else:
self.temp.next = new_node
self.temp = self.temp.next
if __name__ == '__main__':
t=int(input())
for cases in range(t):
x,y,z = map(int,input().strip().split())
a = LinkedList() # create a new linked list 'a'.
b = LinkedList() # create a new linked list 'b'.
nodes_a = list(map(int, input().strip().split()))
nodes_b = list(map(int, input().strip().split()))
nodes_common = list(map(int, input().strip().split()))
for x in nodes_a:
node=Node(x)
a.append(node) # add to the end of the list
for x in nodes_b:
node=Node(x)
b.append(node) # add to the end of the list
for i in range(len(nodes_common)):
node=Node(nodes_common[i])
a.append(node) # add to the end of the list a
if i== 0:
b.append(node) # add to the end of the list b, only the intersection
print(intersetPoint(a.head,b.head))
# } Driver Code Ends | [
"sunamyagupta@gmail.com"
] | sunamyagupta@gmail.com |
ce51ad1ecc38aea688ceb967a158f3a5b6e99f01 | ed12b8d91b207d4bb5cd5bf114184e08c4a9237c | /pe033.py | bab1aa10b62766d72e4407e8447b1d4db87c8972 | [] | no_license | Rynant/project-euler | 544b4b48dda63913abf7d61201fe3ea0961b118f | c19090a6e0e8db3422c47dcce0fb886840493428 | refs/heads/master | 2021-01-10T20:43:16.782124 | 2014-06-11T18:11:28 | 2014-06-11T18:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | from primes import gcd
def answer():
numerator = denominator = 1
for i in range(10, 100):
if not i % 10: continue
for j in range(i+1, 100):
if not j % 10: continue
k, l = str(i), str(j)
if(k.find(l[0]) >= 0):
k, l = float(k[(k.find(l[0])+1)%2]), float(l[1])
elif(k.find(l[1]) >= 0):
k, l = float(k[(k.find(l[1])+1)%2]), float(l[0])
else: continue
if(i / j == k / l):
numerator *= k
denominator *= l
return denominator / gcd(numerator, denominator)
if __name__=='__main__':
print(answer()) | [
"rgrant@garnethill.com"
] | rgrant@garnethill.com |
90e46fcc82a3f160f8cd2bcfbc49f9442619ab7d | 10b43efca8647c86ac0ea9df1dd8368db5dff931 | /gen_data.py | 0d5b03686f6b71f86f251a59605ce223d2bdd756 | [
"MIT"
] | permissive | ruiyangio/latency-graph | ed0244c87d9b6d3d3bff7fa4aaaca1a07f9e40e2 | ba0414b11c31f565a5ca41b29e1d0aad9e545aa2 | refs/heads/master | 2020-03-25T03:36:41.982072 | 2018-08-03T04:20:27 | 2018-08-03T04:20:27 | 143,350,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import csv
import random
import numpy as np
import string
nodes = []
for i in range(120):
nodes.append(''.join((random.choice(string.ascii_uppercase), random.choice(string.ascii_uppercase), random.choice(string.digits))))
edges = []
for i in range(15000):
edges.append( random.choice(nodes) + "\t" + random.choice(nodes) + "\t" + str(np.random.uniform(400)) + "\n" )
with open('data.tsv', 'w') as file:
for edge in edges:
file.write(edge) | [
"ruiyangwind@gmail.com"
] | ruiyangwind@gmail.com |
a0e34d34734d4acd75e8ed1f3ea57119148e7c08 | 2b5cb00bda71b5e76843baa84a9ce1ca6be9e13b | /clustering/acquire_zillow.py | 930e20b94a5ce08c8b410da6e0c4e28d9207feef | [] | no_license | CodyBrettWatson/ds-methodologies-exercises | 07e851b2d08c6c889db4bd849d2d12cc4cc97ecc | 902e880b31d2b76eedca8d80ff0de9e0aa0dcd0f | refs/heads/master | 2020-05-02T15:29:39.784446 | 2019-05-20T12:58:06 | 2019-05-20T12:58:06 | 178,042,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,613 | py | ##########################################
## WILL NEED TO PIPE ALL THESE FUNCTIONS##
##########################################
# Getting data from SQL databases
from env import host, user, password
import pandas as pd
from sqlalchemy import create_engine
def get_db_url(
host: str, user: str, password: str, db_name: str
) -> str:
"""
return url for accessing a mysql database
"""
return f"mysql+pymysql://{user}:{password}@{host}/{db_name}"
def get_sql_conn(host: str, user: str, password: str, db_name: str):
"""
return a mysql connection object
"""
return create_engine(get_db_url(host, user, password, db_name))
def df_from_sql(query: str, url: str) -> pd.DataFrame:
"""
return a Pandas DataFrame resulting from a sql query
"""
return pd.read_sql(query, url)
def get_zillow_data() -> pd.DataFrame:
idb = "zillow"
query = ("SELECT * "
"FROM properties_2016 "
"JOIN properties_2017 USING(parcelid);")
url = get_db_url(host, user, password, idb)
return df_from_sql(query, url)
def get_2016_zillow():
idb = "zillow"
query = ('\
SELECT p16.*, pred16.logerror, act.airconditioningdesc, ast.architecturalstyledesc, \
bct.buildingclassdesc, hst.heatingorsystemdesc, plut.propertylandusedesc, \
st.storydesc, tct.typeconstructiondesc FROM properties_2016 p16 \
JOIN predictions_2016 pred16 \
ON pred16.parcelid = p16.parcelid \
LEFT JOIN airconditioningtype act \
ON p16.airconditioningtypeid = act.airconditioningtypeid\
LEFT JOIN architecturalstyletype ast \
ON p16.architecturalstyletypeid = ast.architecturalstyletypeid\
LEFT JOIN buildingclasstype bct \
ON p16.buildingclasstypeid = bct.buildingclasstypeid\
LEFT JOIN heatingorsystemtype hst \
ON p16.heatingorsystemtypeid = hst.heatingorsystemtypeid\
LEFT JOIN propertylandusetype plut \
ON p16.propertylandusetypeid = plut.propertylandusetypeid\
LEFT JOIN storytype st \
ON p16.storytypeid = st.storytypeid\
LEFT JOIN typeconstructiontype tct \
ON p16.typeconstructiontypeid = tct.typeconstructiontypeid;')
url = get_db_url(host, user, password, idb)
return df_from_sql(query, url)
def get_2017_zillow():
idb = "zillow"
query = ('\
SELECT p17.*, pred17.logerror, act.airconditioningdesc, ast.architecturalstyledesc, \
bct.buildingclassdesc, hst.heatingorsystemdesc, plut.propertylandusedesc, \
st.storydesc, tct.typeconstructiondesc FROM properties_2017 p17 \
JOIN predictions_2017 pred17 \
ON pred17.parcelid = p17.parcelid \
LEFT JOIN airconditioningtype act \
ON p17.airconditioningtypeid = act.airconditioningtypeid\
LEFT JOIN architecturalstyletype ast \
ON p17.architecturalstyletypeid = ast.architecturalstyletypeid\
LEFT JOIN buildingclasstype bct \
ON p17.buildingclasstypeid = bct.buildingclasstypeid\
LEFT JOIN heatingorsystemtype hst \
ON p17.heatingorsystemtypeid = hst.heatingorsystemtypeid\
LEFT JOIN propertylandusetype plut \
ON p17.propertylandusetypeid = plut.propertylandusetypeid\
LEFT JOIN storytype st \
ON p17.storytypeid = st.storytypeid\
LEFT JOIN typeconstructiontype tct \
ON p17.typeconstructiontypeid = tct.typeconstructiontypeid;')
url = get_db_url(host, user, password, idb)
return df_from_sql(query, url)
def merge_dfs():
df16 = get_2016_zillow()
df17 = get_2017_zillow()
df = pd.concat([df16, df17])
return df
def turn_to_csv():
df = merge_dfs()
df.to_csv('zillow_16_17.csv', sep='\t', index=False)
def drop_columns(df):
df = df.drop(columns=(['id',
'airconditioningtypeid',
'architecturalstyletypeid',
'buildingclasstypeid',
'buildingqualitytypeid',
'decktypeid',
'heatingorsystemtypeid',
'propertylandusetypeid',
'storytypeid',
'typeconstructiontypeid']))
return df
def reindex_df (df):
df = df.reindex(columns=[
'parcelid','logerror',
'bathroomcnt','bedroomcnt','calculatedbathnbr','fullbathcnt','roomcnt',
'calculatedfinishedsquarefeet','lotsizesquarefeet',
'unitcnt','propertylandusedesc','propertycountylandusecode','propertyzoningdesc',
'latitude','longitude','regionidcity','regionidcounty','fips','regionidneighborhood','regionidzip',
'yearbuilt',
'structuretaxvaluedollarcnt','taxvaluedollarcnt','landtaxvaluedollarcnt','taxamount','assessmentyear',
'rawcensustractandblock','censustractandblock',
'airconditioningdesc','heatingorsystemdesc',
'garagecarcnt','garagetotalsqft',
'basementsqft',
'finishedfloor1squarefeet','finishedsquarefeet12','finishedsquarefeet13',
'finishedsquarefeet15','finishedsquarefeet50','finishedsquarefeet6',
'fireplacecnt','hashottuborspa',
'poolcnt','poolsizesum','pooltypeid10','pooltypeid2','pooltypeid7',
'threequarterbathnbr',
'yardbuildingsqft17','yardbuildingsqft26',
'fireplaceflag',
'taxdelinquencyflag','taxdelinquencyyear',
'architecturalstyledesc',
'buildingclassdesc',
'numberofstories',
'storydesc',
'typeconstructiondesc',
])
return df
| [
"codywatson@codys-MacBook-Pro.local"
] | codywatson@codys-MacBook-Pro.local |
82a251d9f29c640dcd3ba0e0881292074c885c57 | caeedf133282db88bb11d5a4ae6fb20fee609103 | /AdvpythonDay3/demomutliprocessing/pshttpclient.py | d97fe5bdfba00f99929584fd95bea398c6a04b01 | [] | no_license | Shital-andhalkar/Advance_python_course | 81190bfdaaf6b4da1f43b592ebe496f65461a05c | d49e25efe52ae13713108572493f15cb4d96ea9d | refs/heads/master | 2020-04-03T20:36:25.822808 | 2018-11-01T07:07:33 | 2018-11-01T07:07:33 | 155,551,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import requests
import multiprocessing
from requests.exceptions import ConnectionError
def web_crawler(q):
""""""
try:
p_name=multiprocessing.current_process().name
url = q.get()
payload=requests.get(url).content
print("{}:{}:{}".format(p_name,url,payload[:128]))
except ConnectionError as err:
print(err)
def main():
"""parent process"""
queue_obj= multiprocessing.Queue()
urls=['http://python.org','http://linux.org', 'http://kernel.org/', 'http://google.com']
for url in urls:
child=multiprocessing.Process(target=web_crawler,args=(queue_obj,))
child.start()
for url in urls:
queue_obj.put(url)#add urls in to queue
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Shital-andhalkar.noreply@github.com |
f407b06b2595c41745867f79d0c2bf69dedc166e | 638fa52ac8fc9439f3ad06682c98c21646baf317 | /LatinGlyphs.py | 30d438949c7076016bea617653261187307fa34f | [
"Apache-2.0"
] | permissive | DunwichType/mixer | 68c1965de0a30b34ce9e852fe9e7323e9cb2f6eb | 7b82ba851fdb5b3aa26092d5d54d5e5e47f9b8a1 | refs/heads/master | 2021-01-10T17:10:20.760847 | 2015-10-19T22:30:52 | 2015-10-19T22:30:52 | 44,567,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,631 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Lists of Latin Glyphs for use with Mixer
# Basic Latin Alphabet
majbasic = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
minbasic = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
allbasic = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# DTF Latin Character Set
majuscules = [u'A', u'À', u'Á', u'Â', u'Ã', u'Ä', u'Å', u'Æ', u'Ā', u'Ă', u'Ą', u'Æ', u'Ǽ', u'Z', u'B', u'C', u'Ç', u'Ć', u'Ĉ', u'Ċ', u'Č', u'D', u'Ď', u'Đ', u'E', u'È', u'É', u'Ê', u'Ë', u'Ē', u'Ĕ', u'Ė', u'Ę', u'Ě', u'F', u'G', u'Ĝ', u'Ğ', u'Ġ', u'Ģ', u'H', u'Ĥ', u'Ħ', u'I', u'Ì', u'Í', u'Î', u'Ï', u'Ĩ', u'Ī', u'Ĭ', u'Į', u'J', u'Ĵ', u'K', u'Ķ', u'L', u'Ĺ', u'Ļ', u'Ľ', u'Ł', u'Ŀ', u'M', u'N', u'Ń', u'Ņ', u'Ň', u'Ŋ', u'Ñ', u'Ò', u'Ó', u'Ô', u'Õ', u'Ö', u'Ō', u'Ŏ', u'Ő', u'Ø', u'Ǿ', u'Œ', u'P', u'Q', u'Þ', u'R', u'Ŕ', u'Ř', u'Ŗ', u'S', u'Ś', u'Ŝ', u'Ş', u'Š', u'Ș', u'T', u'Ţ', u'Ť', u'Ŧ', u'Ț', u'U', u'Ù', u'Ú', u'Û', u'Ü', u'Ũ', u'Ū', u'Ŭ', u'Ů', u'Ű', u'Ų', u'V', u'W', u'Ŵ', u'Ẁ', u'Ẃ', u'Ẅ', u'X', u'Y', u'Ý', u'Ŷ', u'Ÿ', u'Z', u'Ź', u'Ż', u'Ž']
minuscules = [u'a', u'à', u'á', u'â', u'ã', u'ä', u'å', u'æ', u'ā', u'ă', u'ą', u'æ', u'ǽ', u'b', u'v', u'ç', u'ć', u'ĉ', u'ċ', u'č', u'd', u'ď', u'đ', u'e', u'è', u'é', u'ê', u'ë', u'ē', u'ĕ', u'ė', u'ę', u'ě', u'f', u'g', u'ĝ', u'ğ', u'ġ', u'ģ', u'h', u'ĥ', u'ħ', u'i', u'ì', u'í', u'î', u'ï', u'ĩ', u'ī', u'ĭ', u'į', u'j', u'ĵ', u'k', u'ķ', u'l', u'ĺ', u'ļ', u'ľ', u'ł', u'ŀ', u'm', u'n', u'ń', u'ņ', u'ň', u'ŋ', u'ñ', u'o', u'ò', u'ó', u'ô', u'õ', u'ö', u'ō', u'ŏ', u'ő', u'ø', u'ǿ', u'œ', u'p', u'þ', u'q', u'r', u'ŕ', u'ř', u'ŗ', u's', u'ś', u'ŝ', u'ş', u'š', u'ș', u'ß', u't', u'ţ', u'ť', u'ŧ', u'ț', u'u', u'ù', u'ú', u'û', u'ü', u'ũ', u'ū', u'ŭ', u'ů', u'ű', u'ų', u'v', u'w', u'ŵ', u'ẁ', u'ẃ', u'ẅ', u'x', u'y', u'ý', u'ŷ', u'ÿ', u'z', u'ź', u'ż', u'ž']
# Punctuation
basicpunct = [u'.', u',', u'\"', u'!', u'?', u'&']
punct = [u'.', u'…', u',', u':', u';', u'?', u'¿', u'!', u'¡', u'\'', u'\"', u'‘', u'’', u'‚', u'“', u'”', u'„', u'‹', u'›', u'«', u'»', u'-', u'–', u'—', u'_', u'†', u'‡', u'•', u'*', u'©', u'®', u'™', u'@', u'¶', u'(', u')', u'[', u']', u'{', u'}', u'/', u'\\', u'|']
# Numbers
currency = [u'#', u'%', u'&', u'¢', u'$', u'£', u'¥', u'ƒ', u'€']
numerals = [u'0', u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9']
prebuilt = [u'½', u'¼', u'¾', u'⅓', u'⅔', u'⅛', u'⅜', u'⅝']
math = [u'<', u'+', u'−', u'=', u'÷', u'×', u'>', u'±', u'^', u'~', u'|', u'¦', u'§', u'°', u'ª', u'º', u'%']
fractions = [u'½', u'¼', u'¾', u'⅓', u'⅔', u'⅛', u'⅜', u'⅝']
# Prototyping
adhesion = [u'a', u'd', u'h', u'e', u's', u'i', u'o', u'n']
ADHESION = [u'A', u'D', u'H', u'E', u'S', u'I', u'O', u'N']
handgloves = [u'h', u'a', u'n', u'd', u'g', u'l', u'o', u'v', u'e', u's']
HANDGLOVES = [u'H', u'A', u'N', u'D', u'G', u'L', u'O', u'V', u'E', u'S']
hamburgefontivs = [u'h', u'a', u'm', u'b', u'u', u'r', u'g', u'e', u'f', u'o', u'n', u't', u'i', u'v', u's']
HAMBURGEFONTIVS = [u'H', u'A', u'M', u'B', u'U', u'R', u'G', u'E', u'F', u'O', u'N', u'T', u'I', u'V', u'S']
#Latin Extended B
majLatinXB = [u'Ɓ', u'Ƃ', u'Ƅ', u'Ɔ', u'Ƈ', u'Ɖ', u'Ɗ', u'Ƌ', u'Ǝ', u'Ə', u'Ɛ', u'Ƒ', u'Ɠ', u'Ɣ', u'Ɩ', u'Ɨ', u'Ƙ', u'Ɯ', u'Ɲ', u'Ɵ', u'Ơ', u'Ƣ', u'Ƥ', u'Ʀ', u'Ƨ', u'Ʃ', u'ƪ', u'Ƭ', u'Ʈ', u'Ư', u'Ʊ', u'Ʋ', u'Ƴ', u'Ƶ', u'Ʒ', u'Ƹ', u'ƻ', u'Ƽ', u'ǀ', u'ǁ', u'ǂ', u'ǃ', u'DŽ', u'Dž', u'LJ', u'Lj', u'NJ', u'Nj', u'Ǎ', u'Ǐ', u'Ǒ', u'Ǔ', u'Ǖ', u'Ǘ', u'Ǚ', u'Ǜ', u'Ǟ', u'Ǡ', u'Ǣ', u'Ǥ', u'Ǧ', u'Ǩ', u'Ǫ', u'Ǭ', u'Ǯ', u'DZ', u'Dz', u'Ǵ', u'Ƕ', u'Ƿ', u'Ǹ', u'Ǻ', u'Ǽ', u'Ȁ', u'Ȃ', u'Ȅ', u'Ȇ', u'Ȉ', u'Ȋ', u'Ȍ', u'Ȏ', u'Ȑ', u'Ȓ', u'Ȕ', u'Ȗ', u'Ș', u'Ț', u'Ȝ', u'Ȟ', u'Ƞ', u'Ȣ', u'Ȥ', u'Ȧ', u'Ȩ', u'Ȫ', u'Ȭ', u'Ȯ', u'Ȱ', u'Ȳ', u'Ⱥ', u'Ȼ', u'Ƚ', u'Ⱦ', u'Ɂ', u'Ƀ', u'Ʉ', u'Ʌ', u'Ɇ', u'Ɉ', u'Ɋ', u'Ɍ', u'Ɏ']
minusLatinXB = [u'ƀ', u'ƃ', u'ƅ', u'ƈ', u'ƌ', u'ƍ', u'ƕ', u'ƙ', u'ƚ', u'ƛ', u'ơ', u'ƣ', u'ƥ', u'ƨ', u'ƫ', u'ƭ', u'ư', u'ƴ', u'ƶ', u'ƹ', u'ƺ', u'ƽ', u'ƾ', u'ƿ', u'dž', u'lj', u'nj', u'ǎ', u'ǐ', u'ǒ', u'ǔ', u'ǖ', u'ǘ', u'ǚ', u'ǜ', u'ǝ', u'ǟ', u'ǡ', u'ǣ', u'ǥ', u'ǧ', u'ǩ', u'ǫ', u'ǭ', u'ǯ', u'dz', u'ǵ', u'ǹ', u'ǻ', u'ǽ', u'ȁ', u'ȃ', u'ȅ', u'ȇ', u'ȉ', u'ȋ', u'ȍ', u'ȏ', u'ȑ', u'ȓ', u'ȕ', u'ȗ', u'ș', u'ț', u'ȝ', u'ȟ', u'ȡ', u'ȣ', u'ȥ', u'ȧ', u'ȩ', u'ȫ', u'ȭ', u'ȯ', u'ȱ', u'ȳ', u'ȴ', u'ȵ', u'ȶ', u'ȷ', u'ȸ', u'ȹ', u'ȼ', u'ȿ', u'ɀ', u'ɂ', u'ɇ', u'ɉ', u'ɋ', u'ɍ', u'ɏ']
#Control Characters
lc_control = [u'anon ', u'bnon ', u'cnon ', u'dnon ', u'enon ', u'fnon ', u'gnon ', u'hnon ', u'inon ', u'jnon ', u'knon ', u'lnon ', u'mnon ', u'nnon ', u'onon ', u'pnon ', u'qnon ', u'rnon ', u'snon ', u'tnon ', u'unon ', u'vnon ', u'wnon ', u'xnon ', u'ynon ', u'znon ']
controls = [u'H', u'O', u'h', u'n', u'o']
majcontrols = [u'H', u'O']
mincontrols = [u'h', u'o', u'n']
figcontrols = [u'0', u'1'] | [
"junker@dunwichtype.com"
] | junker@dunwichtype.com |
0baadeafe82ed3f2330579af9aeb7806db738dc3 | 7f8c24fe161fee3f32e206e013ea89fc8eb9a50a | /example_api/urls.py | 4c07dd5d1421c42a6038b536a60b6f7e7826f9cc | [] | no_license | vnitikesh/rest-registration | a04f4cf643766d3844e7a63e0616157d1c1f1e9a | 0578589f6cb9b9138fa5915395bf616de57eaf0b | refs/heads/main | 2023-02-18T12:32:40.392439 | 2021-01-21T23:55:23 | 2021-01-21T23:55:23 | 331,453,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.urls import path
from . import views
from rest_framework.routers import DefaultRouter
urlpatterns = [
path('category/', views.CategoryListView.as_view(), name = 'category-list'),
path('category/<int:pk>/', views.CategoryDetailView.as_view(), name = 'category-detail'),
path('product/', views.ProductRecordView.as_view(), name = 'product-list'),
path('cart/', views.CartViewSet.as_view(), name = 'cart'),
path('checkout/', views.CheckoutView.as_view(), name = 'checkout'),
#path('order/', views.OrderViewSet.as_view(), name = 'order')
]
| [
"vnitikesh@gmail.com"
] | vnitikesh@gmail.com |
42cb4acff470deea4ca9e3b4cc937a546a964c39 | dc75ed733ecd023aebc2989c1f956ca575bd9e14 | /load_testing/mixed_tomcat.py | 2828e6f44380488c2e1649f73646d64b6a838316 | [] | no_license | deven810/Web-Projects | fd5e9f1e5fd9c348f4731219052e6f680638e311 | 4e8cc3f889d95bf3a37513291da50bae7a704918 | refs/heads/master | 2020-04-17T04:55:20.054883 | 2018-12-05T22:38:29 | 2018-12-05T22:38:29 | 166,253,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # mixed_tomcat.py
import sys, random
from locust import HttpLocust, TaskSet
def readRequest(locust):
postid = random.randint(1, 500)
locust.client.get('/editor/post?action=open&username=cs144&postid='+str(postid), name='/editor/post?action=open')
def writeRequest(locust):
postid = random.randint(1, 500)
locust.client.post('/editor/post?action=open&username=cs144&postid='+str(postid)+'&title=Loading%20Test&body=***Hello%20World!***',
name='/editor/post?action=save')
class MyTaskSet(TaskSet):
""" the class MyTaskSet inherits from the class TaskSet, defining the behavior of the user """
tasks = {writeRequest:1, readRequest:9}
class MyLocust(HttpLocust):
""" the class MyLocust inherits from the class HttpLocust, representing an HTTP user """
task_set = MyTaskSet
min_wait = 1000
max_wait = 2000 | [
"devenagrawal.810@gmail.com"
] | devenagrawal.810@gmail.com |
6eb0d30982c51c95fe8b185a70ce7a5e912cdd20 | 2da72c9f9bbb0b5db33710cddbdee28503e5a606 | /UCI/pyQT-matplot-example 2.py | 0228e2bce7c9d982c2ca7970f732c4860c0e6cc5 | [] | no_license | gddickinson/python_code | 2e71fb22b929cb26c2a1456b11dc515af048c441 | dbb20e171fb556e122350fb40e12cc76adbb9a66 | refs/heads/master | 2022-10-26T15:20:40.709820 | 2022-10-11T16:06:27 | 2022-10-11T16:06:27 | 44,060,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 16:50:19 2015
@author: George
"""
import sys
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib import NavigationToolbar2QTAgg as NavigationToolbar
import matplotlib.pyplot as plt
import random
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Just some button connected to `plot` method
self.button = QtGui.QPushButton('Plot')
self.button.clicked.connect(self.plot)
# set the layout
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.button)
self.setLayout(layout)
def plot(self):
''' plot some random stuff '''
# random data
data = [random.random() for i in range(10)]
# create an axis
ax = self.figure.add_subplot(111)
# discards the old graph
ax.hold(False)
# plot data
ax.plot(data, '*-')
# refresh canvas
self.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = Window()
main.show()
sys.exit(app.exec_()) | [
"george.dickinson@gmail.com"
] | george.dickinson@gmail.com |
4212426d83cef5a31b6993b1859aa096f5a86957 | c7bb490ef96fda51a946478a4f584814e1665a6a | /backend/urls.py | 06c33f1ea3c2e43ed3c886400d353b67ec87d687 | [] | no_license | pawanpaudel93/motion-planning-dashboard | e70acc9737cdedf0fd0beac0a0700cc88f9c2559 | 642f5955d518747dfc14f1f22a93ef20784329d8 | refs/heads/master | 2023-03-11T14:33:31.643898 | 2021-02-28T11:26:16 | 2021-02-28T11:26:16 | 340,398,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """MPD URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework import routers
from .api import urls as api_urls
from .api.views import index_view
router = routers.DefaultRouter()
urlpatterns = [
path('api/v1/', include(api_urls)),
path('admin/', admin.site.urls),
re_path(r'^.*$', index_view, name='index')
]
| [
"pawanpaudel93@gmail.com"
] | pawanpaudel93@gmail.com |
4f2d80280f1710eb34ef81a47bbccef522f62c15 | af5f6d99a23711725ccf0431a62ca37b96acccf3 | /manage.py | 0b1d71a459fa7ee0fc39cd93cffd8801fdc5a6cf | [
"MIT"
] | permissive | nimowairimu/Django-IP1 | daa58f53c1d94e7d1d8dcbbead081be506c75343 | 0def901a0a2f34f644ed42bd4d1c8f883743cffe | refs/heads/main | 2023-03-23T17:21:50.203551 | 2021-03-23T17:23:30 | 2021-03-23T17:23:30 | 348,981,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vetdaily.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"nimowairimu25@gmail.com"
] | nimowairimu25@gmail.com |
a90e5a8ba7a2476925946904b9c73c06d09cfda9 | e7c8bcd6156956123c0ffcd1e3603e8b0ba0fcf8 | /tickTacToe.py | 0da1e458ef3a0aadf53bbbd2573513c697c80919 | [] | no_license | flow0787/python | 421efa9c3541d618778e2f7cd3f3aceed946ee6e | 38a995568c10ff31c516d85877cb9b0ad9596a3a | refs/heads/master | 2020-04-06T06:55:46.278080 | 2020-02-12T08:32:32 | 2020-02-12T08:32:32 | 63,419,040 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | theBoard = {"topL": " ", "topM": " ", "topR": " ",
"midL": " ", "midM": " ", "midR": " ",
"lowL": " ", "lowM": " ", "lowR": " "}
def printBoard(board):
print(board['topL'] + '|' + board['topM'] + '|' + board['topR'])
print('-+-+-')
print(board['midL'] + '|' + board['midM'] + '|' + board['midR'])
print('-+-+-')
print(board['lowL'] + '|' + board['lowM'] + '|' + board['lowR'])
turn = "X"
for i in range(9):
printBoard(theBoard)
move = input("Turn for " + turn + ". Move on which space?")
theBoard[move] = turn
if turn == 'X':
turn = "O"
else:
turn = "X"
printBoard(theBoard) | [
"badeaflorien@gmail.com"
] | badeaflorien@gmail.com |
e8c4c60a57463e9f15f1b88dd4eda1629eea2dfc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /2JHYavYqynX8ZCmMG_5.py | f3bd9ad800ee0f88625397c941672c01b7288b50 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py |
def ascii_sort(lst):
if sum([ord(x) for x in lst[0]]) <= sum([ord(x) for x in lst[1]]):
return lst[0]
return lst[1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
258e2deac675e627b1e12054d8f0b720e887f41f | aa0fc44d694f2b971bbda827c755296faa44d86f | /test/py2neo/index_test.py | f253623c2f494e062c1e5909427d941f1080a0bb | [
"Apache-2.0"
] | permissive | fugu13/py2neo | 8f6a5065883c7eb96bb0d32c45bce2a9533d19a5 | d3fa87199b51b554d1d04c7334d1bc7b887f7273 | refs/heads/master | 2021-01-17T04:52:58.719996 | 2012-08-01T20:44:43 | 2012-08-01T20:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,211 | py | #/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
PY3K = sys.version_info[0] >= 3
__author__ = "Nigel Small <py2neo@nigelsmall.org>"
__copyright__ = "Copyright 2011 Nigel Small"
__license__ = "Apache License, Version 2.0"
from py2neo import neo4j
import unittest
def default_graph_db():
return neo4j.GraphDatabaseService("http://localhost:7474/db/data/")
class NodeIndexTestCase(unittest.TestCase):
def setUp(self):
self.graph_db = default_graph_db()
def test_get_node_index(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
self.assertIsNotNone(index1)
self.assertEqual("index1", index1.name)
self.assertEqual(neo4j.Node, index1.content_type)
def test_add_node_to_index(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice, = self.graph_db.create({"name": "Alice Smith"})
index1.add("surname", "Smith", alice)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_add_node_to_index_with_spaces(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("family name", "von Schmidt")
alice, = self.graph_db.create({"name": "Alice von Schmidt"})
index1.add("family name", "von Schmidt", alice)
entities = index1.get("family name", "von Schmidt")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_add_node_to_index_with_odd_chars(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("@!%#", "!\"£$%^&*()")
alice = self.graph_db.create_node({"name": "Alice Smith"})
index1.add("@!%#", "!\"£$%^&*()", alice)
entities = index1.get("@!%#", "!\"£$%^&*()")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_add_multiple_nodes_to_index(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice, bob, carol = self.graph_db.create(
{"name": "Alice Smith"},
{"name": "Bob Smith"},
{"name": "Carol Smith"}
)
index1.add("surname", "Smith", alice, bob, carol)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(3, len(entities))
for entity in entities:
self.assertTrue(entity in (alice, bob, carol))
def test_get_or_create_node(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice = index1.get_or_create("surname", "Smith", {"name": "Alice Smith"})
self.assertIsNotNone(alice)
self.assertTrue(isinstance(alice, neo4j.Node))
self.assertEqual("Alice Smith", alice["name"])
alice_id = alice.id
for i in range(10):
alice = index1.get_or_create("surname", "Smith", {"name": "Alice Smith"})
self.assertIsNotNone(alice)
self.assertTrue(isinstance(alice, neo4j.Node))
self.assertEqual("Alice Smith", alice["name"])
self.assertEqual(alice_id, alice.id)
def test_add_node_if_none(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("surname", "Smith")
alice, bob = self.graph_db.create(
{"name": "Alice Smith"}, {"name": "Bob Smith"}
)
index1.add_if_none("surname", "Smith", alice)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
index1.add_if_none("surname", "Smith", bob)
entities = index1.get("surname", "Smith")
self.assertIsNotNone(entities)
self.assertTrue(isinstance(entities, list))
self.assertEqual(1, len(entities))
self.assertEqual(alice, entities[0])
def test_node_index_query(self):
index1 = self.graph_db.get_or_create_index(neo4j.Node, "index1")
index1.remove("colour", "red")
index1.remove("colour", "green")
index1.remove("colour", "blue")
red, green, blue = self.graph_db.create({}, {}, {})
index1.add("colour", "red", red)
index1.add("colour", "green", green)
index1.add("colour", "blue", blue)
colours_containing_R = index1.query("colour:*r*")
self.assertTrue(red in colours_containing_R)
self.assertTrue(green in colours_containing_R)
self.assertFalse(blue in colours_containing_R)
if __name__ == '__main__':
unittest.main()
| [
"nigel@nigelsmall.name"
] | nigel@nigelsmall.name |
cd4f12206ec91523ba27cb33a771f3673c839cd1 | cc129db64fc64d1cb9a99526583771c10e245deb | /tests/test_det_next_michigan_development_corporation.py | da9a98ab1e31ab67be68a83440ae713aa016e955 | [
"MIT"
] | permissive | avelosa/city-scrapers-det | a42df36b7d2e98f7be68ae17e22c03af7a20280c | 964b941b67fb5113cda5e2bebd2ba288ac1422d7 | refs/heads/main | 2023-02-02T01:19:07.396737 | 2020-09-29T16:52:11 | 2020-09-29T16:52:11 | 300,441,174 | 1 | 0 | MIT | 2020-10-01T22:30:23 | 2020-10-01T22:30:22 | null | UTF-8 | Python | false | false | 4,826 | py | from datetime import datetime
from os.path import dirname, join
import pytest
import scrapy
from city_scrapers_core.constants import BOARD, PASSED, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from scrapy.settings import Settings
from city_scrapers.spiders.det_next_michigan_development_corporation import (
DetNextMichiganDevelopmentCorporationSpider,
)
LOCATION = {
"name": "DEGC, Guardian Building",
"address": "500 Griswold St, Suite 2200, Detroit, MI 48226",
}
TITLE = "Board of Directors"
test_response = file_response(
join(dirname(__file__), "files", "det_next_michigan_development_corporation.html"),
url="http://www.degc.org/public-authorities/d-nmdc/",
)
freezer = freeze_time("2018-07-26")
spider = DetNextMichiganDevelopmentCorporationSpider()
spider.settings = Settings(values={"CITY_SCRAPERS_ARCHIVE": False})
freezer.start()
parsed_items = [item for item in spider._next_meetings(test_response)]
freezer.stop()
def test_initial_request_count():
freezer.start()
items = list(spider.parse(test_response))
freezer.stop()
assert len(items) == 3
urls = {r.url for r in items if isinstance(r, scrapy.Request)}
assert urls == {
"http://www.degc.org/public-authorities/d-nmdc/fy-2017-2018-meetings/",
"http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings/",
}
# current meeting http://www.degc.org/public-authorities/ldfa/
def test_title():
assert parsed_items[0]["title"] == TITLE
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2018, 9, 11, 9)
def test_end():
assert parsed_items[0]["end"] is None
def test_id():
assert (
parsed_items[0]["id"]
== "det_next_michigan_development_corporation/201809110900/x/board_of_directors"
)
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_location():
assert parsed_items[0]["location"] == LOCATION
def test_sources():
assert parsed_items[0]["source"] == "http://www.degc.org/public-authorities/d-nmdc/"
def test_links():
assert parsed_items[0]["links"] == []
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
# previous meetings e.g.
# http://www.degc.org/public-authorities/ldfa/fy-2017-2018-meetings/
test_prev_response = file_response(
join(
dirname(__file__),
"files",
"det_next_michigan_development_corporation_prev.html",
),
url="http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings",
)
freezer.start()
parsed_prev_items = [item for item in spider._parse_prev_meetings(test_prev_response)]
parsed_prev_items = sorted(parsed_prev_items, key=lambda x: x["start"], reverse=True)
freezer.stop()
def test_prev_request_count():
freezer.start()
items = list(spider._prev_meetings(test_response))
freezer.stop()
urls = {r.url for r in items if isinstance(r, scrapy.Request)}
assert len(urls) == 2
assert urls == {
"http://www.degc.org/public-authorities/d-nmdc/fy-2017-2018-meetings/",
"http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings/",
}
def test_prev_meeting_count():
assert len(parsed_prev_items) == 1
def test_prev_title():
assert parsed_prev_items[0]["title"] == TITLE
def test_prev_description():
assert parsed_prev_items[0]["description"] == ""
def test_prev_start():
assert parsed_prev_items[0]["start"] == datetime(2017, 8, 8, 9)
def test_prev_end():
assert parsed_prev_items[0]["end"] is None
def test_prev_id():
assert (
parsed_prev_items[0]["id"]
== "det_next_michigan_development_corporation/201708080900/x/board_of_directors"
)
def test_prev_status():
assert parsed_prev_items[0]["status"] == PASSED
def test_prev_location():
assert parsed_prev_items[0]["location"] == LOCATION
def test_prev_source():
assert (
parsed_prev_items[0]["source"]
== "http://www.degc.org/public-authorities/d-nmdc/dnmdc-fy-2016-2017-meetings"
)
def test_prev_links():
assert parsed_prev_items[0]["links"] == [
{
"href": "http://www.degc.org/wp-content/uploads/2016-08-09-DNMDC-Special-Board-Meeting-Agenda-4-1.pdf", # noqa
"title": "D-NMDC Agenda",
},
]
@pytest.mark.parametrize("item", parsed_prev_items)
def test_prev_all_day(item):
assert item["all_day"] is False
@pytest.mark.parametrize("item", parsed_prev_items)
def test_prev_classification(item):
assert item["classification"] == BOARD
| [
"pjsier@gmail.com"
] | pjsier@gmail.com |
4c54b23822c77598fc8746f24f4c1bf18cdad087 | d9fb6c246965cbf290186268298859ddb913ee6e | /190813/03_mod.py | 3a21a5da1950eb762f029d3aa591e49c9be98f49 | [] | no_license | 91hongppie/algorithm | 1ca6d54de6eab252c708bf83835ace8a109d73fc | 4c2fa8178e0ef7afbf0b736387f05cbada72f95d | refs/heads/master | 2020-07-20T22:17:40.700366 | 2020-06-29T00:06:11 | 2020-06-29T00:06:11 | 206,717,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import sys
sys.stdin = open('sample_input_03.txt', 'r')
N = int(input())
for i in range(1, N+1):
play = list(map(int, input().split()))
test_words = [[] for i in range(play[0])]
for j in range(play[0]):
test_words[j] = list(map(str, input()))
for m in range(play[0]):
for n in range(play[0]):
mo_list = test_words[m][n:play[0]:] | [
"91hongppie@gmail.com"
] | 91hongppie@gmail.com |
929d4cbe14e60aaf7683f78e7b8e87aa8cf4d89d | a2490d50c85bc8385cdda1e2eaf88f02951dc808 | /client/verta/verta/_protos/public/modeldb/metadata/MetadataService_pb2.py | f7435b6df5e8379dcdef0246f8e30430db6a2fe6 | [
"Apache-2.0"
] | permissive | Atharex/modeldb | 2e379bc87df054dc5c1a9058620aef8a3ada9108 | 3a286d5861c1dd14342084793dd7d7584ff8a29b | refs/heads/master | 2022-11-08T09:23:37.799241 | 2020-07-01T12:16:31 | 2020-07-01T12:16:31 | 275,455,778 | 0 | 0 | Apache-2.0 | 2020-06-27T21:26:05 | 2020-06-27T21:26:05 | null | UTF-8 | Python | false | true | 16,824 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modeldb/metadata/MetadataService.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modeldb/metadata/MetadataService.proto',
package='ai.verta.modeldb.metadata',
syntax='proto3',
serialized_options=b'P\001ZGgithub.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb/metadata',
serialized_pb=b'\n&modeldb/metadata/MetadataService.proto\x12\x19\x61i.verta.modeldb.metadata\x1a\x1cgoogle/api/annotations.proto\"U\n\nIDTypeEnum\"G\n\x06IDType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x19\n\x15VERSIONING_REPOSITORY\x10\x01\x12\x15\n\x11VERSIONING_COMMIT\x10\x02\"\x80\x01\n\x12IdentificationType\x12=\n\x07id_type\x18\x01 \x01(\x0e\x32,.ai.verta.modeldb.metadata.IDTypeEnum.IDType\x12\x10\n\x06int_id\x18\x02 \x01(\x04H\x00\x12\x13\n\tstring_id\x18\x03 \x01(\tH\x00\x42\x04\n\x02id\"i\n\x10GetLabelsRequest\x12\x39\n\x02id\x18\x01 \x01(\x0b\x32-.ai.verta.modeldb.metadata.IdentificationType\x1a\x1a\n\x08Response\x12\x0e\n\x06labels\x18\x01 \x03(\t\"y\n\x10\x41\x64\x64LabelsRequest\x12\x39\n\x02id\x18\x01 \x01(\x0b\x32-.ai.verta.modeldb.metadata.IdentificationType\x12\x0e\n\x06labels\x18\x02 \x03(\t\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\"|\n\x13\x44\x65leteLabelsRequest\x12\x39\n\x02id\x18\x01 \x01(\x0b\x32-.ai.verta.modeldb.metadata.IdentificationType\x12\x0e\n\x06labels\x18\x02 \x03(\t\x1a\x1a\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x08\x32\xca\x03\n\x0fMetadataService\x12\x8b\x01\n\tGetLabels\x12+.ai.verta.modeldb.metadata.GetLabelsRequest\x1a\x34.ai.verta.modeldb.metadata.GetLabelsRequest.Response\"\x1b\x82\xd3\xe4\x93\x02\x15\x12\x13/v1/metadata/labels\x12\x8e\x01\n\tAddLabels\x12+.ai.verta.modeldb.metadata.AddLabelsRequest\x1a\x34.ai.verta.modeldb.metadata.AddLabelsRequest.Response\"\x1e\x82\xd3\xe4\x93\x02\x18\x1a\x13/v1/metadata/labels:\x01*\x12\x97\x01\n\x0c\x44\x65leteLabels\x12..ai.verta.modeldb.metadata.DeleteLabelsRequest\x1a\x37.ai.verta.modeldb.metadata.DeleteLabelsRequest.Response\"\x1e\x82\xd3\xe4\x93\x02\x18*\x13/v1/metadata/labels:\x01*BKP\x01ZGgithub.com/VertaAI/modeldb/protos/gen/go/protos/public/modeldb/metadatab\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_IDTYPEENUM_IDTYPE = _descriptor.EnumDescriptor(
name='IDType',
full_name='ai.verta.modeldb.metadata.IDTypeEnum.IDType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERSIONING_REPOSITORY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERSIONING_COMMIT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=113,
serialized_end=184,
)
_sym_db.RegisterEnumDescriptor(_IDTYPEENUM_IDTYPE)
_IDTYPEENUM = _descriptor.Descriptor(
name='IDTypeEnum',
full_name='ai.verta.modeldb.metadata.IDTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_IDTYPEENUM_IDTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=184,
)
_IDENTIFICATIONTYPE = _descriptor.Descriptor(
name='IdentificationType',
full_name='ai.verta.modeldb.metadata.IdentificationType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_type', full_name='ai.verta.modeldb.metadata.IdentificationType.id_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int_id', full_name='ai.verta.modeldb.metadata.IdentificationType.int_id', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='string_id', full_name='ai.verta.modeldb.metadata.IdentificationType.string_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.IdentificationType.id',
index=0, containing_type=None, fields=[]),
],
serialized_start=187,
serialized_end=315,
)
_GETLABELSREQUEST_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.metadata.GetLabelsRequest.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='labels', full_name='ai.verta.modeldb.metadata.GetLabelsRequest.Response.labels', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=396,
serialized_end=422,
)
_GETLABELSREQUEST = _descriptor.Descriptor(
name='GetLabelsRequest',
full_name='ai.verta.modeldb.metadata.GetLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.GetLabelsRequest.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETLABELSREQUEST_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=317,
serialized_end=422,
)
_ADDLABELSREQUEST_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.metadata.AddLabelsRequest.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.modeldb.metadata.AddLabelsRequest.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=545,
)
_ADDLABELSREQUEST = _descriptor.Descriptor(
name='AddLabelsRequest',
full_name='ai.verta.modeldb.metadata.AddLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.AddLabelsRequest.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='ai.verta.modeldb.metadata.AddLabelsRequest.labels', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ADDLABELSREQUEST_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=424,
serialized_end=545,
)
_DELETELABELSREQUEST_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.Response.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=545,
)
_DELETELABELSREQUEST = _descriptor.Descriptor(
name='DeleteLabelsRequest',
full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='ai.verta.modeldb.metadata.DeleteLabelsRequest.labels', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DELETELABELSREQUEST_RESPONSE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=671,
)
_IDTYPEENUM_IDTYPE.containing_type = _IDTYPEENUM
_IDENTIFICATIONTYPE.fields_by_name['id_type'].enum_type = _IDTYPEENUM_IDTYPE
_IDENTIFICATIONTYPE.oneofs_by_name['id'].fields.append(
_IDENTIFICATIONTYPE.fields_by_name['int_id'])
_IDENTIFICATIONTYPE.fields_by_name['int_id'].containing_oneof = _IDENTIFICATIONTYPE.oneofs_by_name['id']
_IDENTIFICATIONTYPE.oneofs_by_name['id'].fields.append(
_IDENTIFICATIONTYPE.fields_by_name['string_id'])
_IDENTIFICATIONTYPE.fields_by_name['string_id'].containing_oneof = _IDENTIFICATIONTYPE.oneofs_by_name['id']
_GETLABELSREQUEST_RESPONSE.containing_type = _GETLABELSREQUEST
_GETLABELSREQUEST.fields_by_name['id'].message_type = _IDENTIFICATIONTYPE
_ADDLABELSREQUEST_RESPONSE.containing_type = _ADDLABELSREQUEST
_ADDLABELSREQUEST.fields_by_name['id'].message_type = _IDENTIFICATIONTYPE
_DELETELABELSREQUEST_RESPONSE.containing_type = _DELETELABELSREQUEST
_DELETELABELSREQUEST.fields_by_name['id'].message_type = _IDENTIFICATIONTYPE
DESCRIPTOR.message_types_by_name['IDTypeEnum'] = _IDTYPEENUM
DESCRIPTOR.message_types_by_name['IdentificationType'] = _IDENTIFICATIONTYPE
DESCRIPTOR.message_types_by_name['GetLabelsRequest'] = _GETLABELSREQUEST
DESCRIPTOR.message_types_by_name['AddLabelsRequest'] = _ADDLABELSREQUEST
DESCRIPTOR.message_types_by_name['DeleteLabelsRequest'] = _DELETELABELSREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IDTypeEnum = _reflection.GeneratedProtocolMessageType('IDTypeEnum', (_message.Message,), {
'DESCRIPTOR' : _IDTYPEENUM,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.IDTypeEnum)
})
_sym_db.RegisterMessage(IDTypeEnum)
IdentificationType = _reflection.GeneratedProtocolMessageType('IdentificationType', (_message.Message,), {
'DESCRIPTOR' : _IDENTIFICATIONTYPE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.IdentificationType)
})
_sym_db.RegisterMessage(IdentificationType)
GetLabelsRequest = _reflection.GeneratedProtocolMessageType('GetLabelsRequest', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _GETLABELSREQUEST_RESPONSE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.GetLabelsRequest.Response)
})
,
'DESCRIPTOR' : _GETLABELSREQUEST,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.GetLabelsRequest)
})
_sym_db.RegisterMessage(GetLabelsRequest)
_sym_db.RegisterMessage(GetLabelsRequest.Response)
AddLabelsRequest = _reflection.GeneratedProtocolMessageType('AddLabelsRequest', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _ADDLABELSREQUEST_RESPONSE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.AddLabelsRequest.Response)
})
,
'DESCRIPTOR' : _ADDLABELSREQUEST,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.AddLabelsRequest)
})
_sym_db.RegisterMessage(AddLabelsRequest)
_sym_db.RegisterMessage(AddLabelsRequest.Response)
DeleteLabelsRequest = _reflection.GeneratedProtocolMessageType('DeleteLabelsRequest', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _DELETELABELSREQUEST_RESPONSE,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.DeleteLabelsRequest.Response)
})
,
'DESCRIPTOR' : _DELETELABELSREQUEST,
'__module__' : 'modeldb.metadata.MetadataService_pb2'
# @@protoc_insertion_point(class_scope:ai.verta.modeldb.metadata.DeleteLabelsRequest)
})
_sym_db.RegisterMessage(DeleteLabelsRequest)
_sym_db.RegisterMessage(DeleteLabelsRequest.Response)
DESCRIPTOR._options = None
_METADATASERVICE = _descriptor.ServiceDescriptor(
name='MetadataService',
full_name='ai.verta.modeldb.metadata.MetadataService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=674,
serialized_end=1132,
methods=[
_descriptor.MethodDescriptor(
name='GetLabels',
full_name='ai.verta.modeldb.metadata.MetadataService.GetLabels',
index=0,
containing_service=None,
input_type=_GETLABELSREQUEST,
output_type=_GETLABELSREQUEST_RESPONSE,
serialized_options=b'\202\323\344\223\002\025\022\023/v1/metadata/labels',
),
_descriptor.MethodDescriptor(
name='AddLabels',
full_name='ai.verta.modeldb.metadata.MetadataService.AddLabels',
index=1,
containing_service=None,
input_type=_ADDLABELSREQUEST,
output_type=_ADDLABELSREQUEST_RESPONSE,
serialized_options=b'\202\323\344\223\002\030\032\023/v1/metadata/labels:\001*',
),
_descriptor.MethodDescriptor(
name='DeleteLabels',
full_name='ai.verta.modeldb.metadata.MetadataService.DeleteLabels',
index=2,
containing_service=None,
input_type=_DELETELABELSREQUEST,
output_type=_DELETELABELSREQUEST_RESPONSE,
serialized_options=b'\202\323\344\223\002\030*\023/v1/metadata/labels:\001*',
),
])
_sym_db.RegisterServiceDescriptor(_METADATASERVICE)
DESCRIPTOR.services_by_name['MetadataService'] = _METADATASERVICE
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | Atharex.noreply@github.com |
91503fa1a7ffe5118597d43b74f8c1563b6bdca6 | b4c164c9c6f91badb305bae23246ab0c5ba5fcbe | /Problem Set 3/Motion.py | 3d2b43cec29e675115eb800b8faf2a7e42d0d3c2 | [] | no_license | KhrulSergey/AI_Robotics_Udacity | e2b25a5b9d752b2daaa5195b7b487738aae83231 | ff41e877f2af87348de8a0d44bc8f51ea29523f8 | refs/heads/master | 2021-04-26T22:31:06.456066 | 2019-03-30T20:33:49 | 2019-03-30T20:34:20 | 124,104,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,159 | py | # -----------------
# USER INSTRUCTIONS
#
# Write a function in the class robot called move()
#
# that takes self and a motion vector (this
# motion vector contains a steering* angle and a
# distance) as input and returns an instance of the class
# robot with the appropriate x, y, and orientation
# for the given motion.
#
# *steering is defined in the video
# which accompanies this problem.
#
# For now, please do NOT add noise to your move function.
#
# Please do not modify anything except where indicated
# below.
#
# There are test cases which you are free to use at the
# bottom. If you uncomment them for testing, make sure you
# re-comment them before you submit.
from math import *
import random
# --------
#
# the "world" has 4 landmarks.
# the robot's initial coordinates are somewhere in the square
# represented by the landmarks.
#
# NOTE: Landmark coordinates are given in (y, x) form and NOT
# in the traditional (x, y) format!
landmarks = [[0.0, 100.0], [0.0, 0.0], [100.0, 0.0], [100.0, 100.0]] # position of 4 landmarks
world_size = 100.0 # world is NOT cyclic. Robot is allowed to travel "out of bounds"
max_steering_angle = pi / 4 # You don't need to use this value, but it is good to keep in mind the limitations of a real car.
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation
#
def __init__(self, length=10.0):
self.x = random.random() * world_size # initial x position
self.y = random.random() * world_size # initial y position
self.orientation = random.random() * 2.0 * pi # initial orientation
self.length = length # length of robot
self.bearing_noise = 0.0 # initialize bearing noise to zero
self.steering_noise = 0.0 # initialize steering noise to zero
self.distance_noise = 0.0 # initialize distance noise to zero
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise (ValueError, 'Orientation must be in [0..2pi]')
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_b_noise, new_s_noise, new_d_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.bearing_noise = float(new_b_noise)
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
############# ONLY ADD/MODIFY CODE BELOW HERE ###################
# --------
# move:
# move along a section of a circular path according to motion
# motion[0] - angle of steering
# motion[1] - move_distance = x
#
def move(self, motion): # Do not change the name of this function
stearing_angle = motion[0]
distance = motion[1]
if abs(stearing_angle) > max_steering_angle:
raise (ValueError, 'Exceed max steering angle')
if distance < 0:
raise (ValueError, 'Moving backwards is not permited')
epsilon = 0.001
result = robot()
result.set_noise(self.bearing_noise, self.steering_noise, self.distance_noise)
result.length = self.length
# apply noise to future
stearing_angle2 = random.gauss(stearing_angle, self.steering_noise)
dist2 = random.gauss(distance, self.distance_noise)
turn_angle = dist2/result.length * tan(stearing_angle2)
if(abs(turn_angle) < epsilon):
# approximate by straight line motion
result.x = self.x + dist2 * cos(self.orientation)
result.y = self.y + dist2 * sin(self.orientation)
result.orientation = (self.orientation + turn_angle)%(2.0*pi)
else:
# approximate bycicle model for motion
R = dist2/turn_angle
cx = self.x - sin(self.orientation)*R
cy = self.y + cos(self.orientation)*R
result.orientation = (self.orientation + turn_angle) % (2.0 * pi)
result.x = cx + (sin(result.orientation)* R)
result.y = cy - (cos(result.orientation)* R)
return result # make sure your move function returns an instance
# of the robot class with the correct coordinates.
############## ONLY ADD/MODIFY CODE ABOVE HERE ####################
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
## --------
## TEST CASE:
##
## 1) The following code should print:
## Robot: [x=0.0 y=0.0 orient=0.0]
## Robot: [x=10.0 y=0.0 orient=0.0]
## Robot: [x=19.861 y=1.4333 orient=0.2886]
## Robot: [x=39.034 y=7.1270 orient=0.2886]
##
##
# length = 20.
# bearing_noise = 0.0
# steering_noise = 0.0
# distance_noise = 0.0
#
# myrobot = robot(length)
# myrobot.set(0.0, 0.0, 0.0)
# myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
#
# motions = [[0.0, 10.0], [pi / 6.0, 10], [0.0, 20.0]]
# T = len(motions)
#
# print ('Robot: ', myrobot)
# for t in range(T):
# myrobot = myrobot.move(motions[t])
# print ('Robot: ', myrobot)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
## 2) The following code should print:
## Robot: [x=0.0 y=0.0 orient=0.0]
## Robot: [x=9.9828 y=0.5063 orient=0.1013]
## Robot: [x=19.863 y=2.0201 orient=0.2027]
## Robot: [x=29.539 y=4.5259 orient=0.3040]
## Robot: [x=38.913 y=7.9979 orient=0.4054]
## Robot: [x=47.887 y=12.400 orient=0.5067]
## Robot: [x=56.369 y=17.688 orient=0.6081]
## Robot: [x=64.273 y=23.807 orient=0.7094]
## Robot: [x=71.517 y=30.695 orient=0.8108]
## Robot: [x=78.027 y=38.280 orient=0.9121]
## Robot: [x=83.736 y=46.485 orient=1.0135]
##
length = 20.
bearing_noise = 0.0
steering_noise = 0.0
distance_noise = 0.0
myrobot = robot(length)
myrobot.set(0.0, 0.0, 0.0)
myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
motions = [[0.2, 10.] for row in range(10)]
T = len(motions)
print ('Robot: ', myrobot)
for t in range(T):
myrobot = myrobot.move(motions[t])
print ('Robot: ', myrobot)
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
| [
"siberianodis@gmail.com"
] | siberianodis@gmail.com |
966163ac218a00e186e1835d56634756e84143fb | cf72eced416ae5fee75e194b4da7728a00520c54 | /Chapter12SpreadSheetCellInverter.py | 1c1fd68ab2760d6d47d1bc1f4deba4c7b1928ea9 | [] | no_license | spencercorwin/automate-the-boring-stuff-answers | ed08080ec3c38a5cc84510e13995cb3cb95d5809 | e564658ae702672670e17c2989a58d75b9110d32 | refs/heads/master | 2021-09-19T02:50:39.541981 | 2018-07-22T22:09:13 | 2018-07-22T22:09:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #! usr/bin/env python3
#Chapter 12 Challenge - Spreadsheet Cell Inverter
#This program inverts the row and column of cells in a spreadsheet
import os, openpyxl, pprint
os.chdir('/Users/spencercorwin/Desktop')
wb = openpyxl.load_workbook('testFile.xlsx')
sheet = wb.active
resultSheet = wb.create_sheet(index=2, title='resultSheet')
sheetData = []
for r in range(0, sheet.max_row):
sheetData.append([])
for c in range(0, sheet.max_column):
sheetData[r].append(sheet.cell(row = r+1, column = c+1).value)
for r in range(0, sheet.max_row):
for c in range(0, sheet.max_column):
resultSheet.cell(row = c+1, column = r+1).value = sheetData[r][c]
wb.save('myTestResult.xlsx')
| [
"noreply@github.com"
] | spencercorwin.noreply@github.com |
4ac613dc87fb37cf7e557918be4a06f39615bb70 | c3b5e412fd9479c046c09c251dd0688a4fec6130 | /solver.py | f360ff7d89db5f41f308259d807e7c2f16ec6ff5 | [] | no_license | Pazaak/NaoPickUp | edb45f8d4dbf392d0a4737c3d9e91b2dfd00b2a6 | 417a20e344fa57272a1481810571287a18c864a4 | refs/heads/master | 2020-05-20T00:48:28.326318 | 2015-07-08T08:21:03 | 2015-07-08T08:21:03 | 36,508,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,804 | py | __author__ = 'Luis Fabregues de los Santos'
import heapq as heap
import copy
import math
infty = float('inf')
def distance_2d(x1, y1, x2, y2):
x = x1 - x2
y = y1 - y2
return math.sqrt(x*x+y*y)
def value(list, robots, target):
result = 0
for path in xrange(len(list)):
for i in xrange(len(list[path])):
if i == 0:
result += distance_2d(robots[path].x, robots[path].y, list[path][i].x, list[path][i].y)
result += distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
else:
result += 2 * distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
return result
# Evaluation function 1, min total steps
def branchAndBound1(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append(distance_2d(box.x, box.y, target.x, target.y) * 2)
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), 0], \
current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
plusScore = 0
for scbox in newboxes:
plusScore += scores[scbox]
temp = (value(temp, robots, target) + plusScore, newboxes, temp)
heap.heappush(pool, temp)
insertions += 1
iteraciones += 1
def value2(list, robots, target):
result = []
for path in xrange(len(list)):
result.append(0)
for i in xrange(len(list[path])):
if i == 0:
result[path] += distance_2d(robots[path].x, robots[path].y, list[path][i].x, list[path][i].y)
result[path] += distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
else:
result[path] += 2 * distance_2d(target.x, target.y, list[path][i].x, list[path][i].y)
return max(result)
# Evaluation function 2, min max robot steps
def branchAndBound2(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append((distance_2d(box.x, box.y, target.x, target.y) * 2))
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), \
value2(current[2], robots, target)], current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
temp = (value2(temp, robots, target), newboxes, temp)
heap.heappush(pool, temp)
insertions += 1
iteraciones += 1
# Evaluation function 2, with optimistic score, UNSTABLE
def branchAndBound3(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append(distance_2d(box.x, box.y, target.x, target.y) * 2)
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), \
value2(current[2], robots, target)], current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
temp = (2*value2(temp, robots, target)+len(newboxes), newboxes, temp)
heap.heappush(pool, temp)
insertions += 1
iteraciones += 1
# Evaluation function 2, save the better, complete state
def branchAndBound4(robots, target, boxes):
maxLen = 2
insertions = 2
creations = 2
extractions = 0
bestYet = infty
boxes = sorted(boxes, key=lambda x: distance_2d(x.x, x.y, target.x, target.y), reverse=True)
pool = []
scores = []
for box in boxes:
scores.append((distance_2d(box.x, box.y, target.x, target.y) * 2))
current = []
for i in xrange(len(robots)):
current.append([])
heap.heappush(pool, (sum(scores), range(len(boxes)), current))
iteraciones = 0
while len(pool) > 0:
if len(pool) > maxLen: maxLen = len(pool)
extractions += 1
current = heap.heappop(pool)
# Si la lista de cajas esta vacia
if not current[1]:
return [maxLen, insertions, creations, extractions, iteraciones, value(current[2], robots, target), \
value2(current[2], robots, target)], \
current[2]
else:
# Por cada caja que quede la intentamos asignar a un robot
for nbox in current[1]:
for i in xrange(len(robots)):
temp = copy.deepcopy(current[2])
temp[i].append(boxes[nbox])
# Creamos una nueva lista de cajas
newboxes = list(current[1])
newboxes.remove(nbox)
creations += 1
temp = (value2(temp, robots, target), newboxes, temp)
if temp[0] < bestYet:
heap.heappush(pool, temp)
insertions += 1
if not temp[1]:
bestYet = temp[0]
iteraciones += 1 | [
"aggalem@gmail.com"
] | aggalem@gmail.com |
bd1236dee44cc218e34f71aa057ce6aeaae640d8 | 4f365fbdfd4701c3a294dfba17c1377d4eb369d8 | /jinja2htmlcompress.py | 507c7509a9a3a8418fcb4ce187fb21809e76fc26 | [
"BSD-3-Clause"
] | permissive | Orvillar/jinja2-htmlcompress | 4e725f9b6ceb6f327d4247d7dab6f55d344039ea | b34dc409762aaf205ccd59e37ad4b3dc5331904d | refs/heads/master | 2020-04-07T16:06:54.607802 | 2018-11-21T08:31:21 | 2018-11-21T08:31:21 | 158,515,466 | 0 | 0 | NOASSERTION | 2018-11-21T08:29:20 | 2018-11-21T08:29:19 | null | UTF-8 | Python | false | false | 6,354 | py | # -*- coding: utf-8 -*-
"""
jinja2htmlcompress
~~~~~~~~~~~~~~~~~~
A Jinja2 extension that eliminates useless whitespace at template
compilation time without extra overhead.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.ext import Extension
from jinja2.lexer import Token, describe_token
from jinja2 import TemplateSyntaxError
_tag_re = re.compile(r'(?:<(/?)([a-zA-Z0-9_-]+)\s*|(>\s*))(?s)')
_ws_normalize_re = re.compile(r'[ \t\r\n]+')
class StreamProcessContext(object):
def __init__(self, stream):
self.stream = stream
self.token = None
self.stack = []
def fail(self, message):
raise TemplateSyntaxError(message, self.token.lineno,
self.stream.name, self.stream.filename)
def _make_dict_from_listing(listing):
rv = {}
for keys, value in listing:
for key in keys:
rv[key] = value
return rv
class HTMLCompress(Extension):
isolated_elements = set(['script', 'style', 'noscript', 'textarea'])
void_elements = set(['br', 'img', 'area', 'hr', 'param', 'input',
'embed', 'col'])
block_elements = set(['div', 'p', 'form', 'ul', 'ol', 'li', 'table', 'tr',
'tbody', 'thead', 'tfoot', 'tr', 'td', 'th', 'dl',
'dt', 'dd', 'blockquote', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre'])
breaking_rules = _make_dict_from_listing([
(['p'], set(['#block'])),
(['li'], set(['li'])),
(['td', 'th'], set(['td', 'th', 'tr', 'tbody', 'thead', 'tfoot'])),
(['tr'], set(['tr', 'tbody', 'thead', 'tfoot'])),
(['thead', 'tbody', 'tfoot'], set(['thead', 'tbody', 'tfoot'])),
(['dd', 'dt'], set(['dl', 'dt', 'dd']))
])
def is_isolated(self, stack):
for tag in reversed(stack):
if tag in self.isolated_elements:
return True
return False
def is_breaking(self, tag, other_tag):
breaking = self.breaking_rules.get(other_tag)
return breaking and (tag in breaking or
('#block' in breaking and tag in self.block_elements))
def enter_tag(self, tag, ctx):
while ctx.stack and self.is_breaking(tag, ctx.stack[-1]):
self.leave_tag(ctx.stack[-1], ctx)
if tag not in self.void_elements:
ctx.stack.append(tag)
def leave_tag(self, tag, ctx):
if not ctx.stack:
ctx.fail('Tried to leave "%s" but something closed '
'it already' % tag)
if tag == ctx.stack[-1]:
ctx.stack.pop()
return
for idx, other_tag in enumerate(reversed(ctx.stack)):
if other_tag == tag:
for num in xrange(idx + 1):
ctx.stack.pop()
elif not self.breaking_rules.get(other_tag):
break
def normalize(self, ctx):
pos = 0
buffer = []
def write_data(value):
if not self.is_isolated(ctx.stack):
value = _ws_normalize_re.sub(' ', value.strip())
buffer.append(value)
for match in _tag_re.finditer(ctx.token.value):
closes, tag, sole = match.groups()
preamble = ctx.token.value[pos:match.start()]
write_data(preamble)
if sole:
write_data(sole)
else:
buffer.append(match.group())
(closes and self.leave_tag or self.enter_tag)(tag, ctx)
pos = match.end()
write_data(ctx.token.value[pos:])
return u''.join(buffer)
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
for token in stream:
if token.type != 'data':
yield token
continue
ctx.token = token
value = self.normalize(ctx)
yield Token(token.lineno, 'data', value)
class SelectiveHTMLCompress(HTMLCompress):
def filter_stream(self, stream):
ctx = StreamProcessContext(stream)
strip_depth = 0
while 1:
if stream.current.type == 'block_begin':
if stream.look().test('name:strip') or \
stream.look().test('name:endstrip'):
stream.skip()
if stream.current.value == 'strip':
strip_depth += 1
else:
strip_depth -= 1
if strip_depth < 0:
ctx.fail('Unexpected tag endstrip')
stream.skip()
if stream.current.type != 'block_end':
ctx.fail('expected end of block, got %s' %
describe_token(stream.current))
stream.skip()
if strip_depth > 0 and stream.current.type == 'data':
ctx.token = stream.current
value = self.normalize(ctx)
yield Token(stream.current.lineno, 'data', value)
else:
yield stream.current
stream.next()
def test():
from jinja2 import Environment
env = Environment(extensions=[HTMLCompress])
tmpl = env.from_string('''
<html>
<head>
<title>{{ title }}</title>
</head>
<script type=text/javascript>
if (foo < 42) {
document.write('Foo < Bar');
}
</script>
<body>
<li><a href="{{ href }}">{{ title }}</a><br>Test Foo
<li><a href="{{ href }}">{{ title }}</a><img src=test.png>
</body>
</html>
''')
print tmpl.render(title=42, href='index.html')
env = Environment(extensions=[SelectiveHTMLCompress])
tmpl = env.from_string('''
Normal <span> unchanged </span> stuff
{% strip %}Stripped <span class=foo > test </span>
<a href="foo"> test </a> {{ foo }}
Normal <stuff> again {{ foo }} </stuff>
<p>
Foo<br>Bar
Baz
<p>
Moep <span>Test</span> Moep
</p>
{% endstrip %}
''')
print tmpl.render(foo=42)
if __name__ == '__main__':
test()
| [
"armin.ronacher@active-4.com"
] | armin.ronacher@active-4.com |
99b49fca33ce2929cfd1a125527e1ee432ccfad4 | 42d58ba3005263744a04e6eb6e5a7e550b4eef29 | /Day2_Tip_Calculator.py | b63dca6d2e892a1ae8ef34186a247a15cec8473e | [] | no_license | ShaneNelsonCodes/100Days_Python | 8828ccfba0873084316f46bc98a53c603de452ae | 4cea2ba5fe0459a4676c7d164b5402dd244ff11b | refs/heads/main | 2023-02-12T14:01:10.726683 | 2021-01-12T13:59:04 | 2021-01-12T13:59:04 | 317,283,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | #If the bill was $150.00, split between 5 people, with 12% tip.
#Each person should pay (150.00 / 5) * 1.12 = 33.6
#Format the result to 2 decimal places = 33.60
#Tip: You might need to do some research in Google to figure out how to do this.
print("Welcome to the tip calculator\n")
bill = float(input("What was the total bill?\n$"))
tip = int(input("What percentage would you like to give? 10, 12, or 15\n%"))/100
split = int(input("How many people to split the bill?\n"))
amount = round(((bill * (1 + tip)) / split),2)
print(f"Each person should pay: ${amount}")
| [
"Shane.Nelson@kp.org"
] | Shane.Nelson@kp.org |
c13f2224b2a218046e61fc2c3ce17a06270f3028 | d6a030dfacb63fcb7ede3b1ed97dd723e5aab478 | /plotfio.py | fad2a7454bd7ff152db230d88500db0c26b49071 | [] | no_license | ahlfors/FIO-scripts | 0adabf2ffc164de67b7757aeb7ed6a4eb999b711 | b8fcb4998d3516606da965f421a6f6f3c8957dc2 | refs/heads/master | 2023-03-16T14:53:40.037181 | 2019-03-18T19:32:32 | 2019-03-18T19:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,417 | py | #!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import csv, argparse
matplotlib.use('Agg')
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest="files", type=str, nargs='+', required=True, help='The out.txt files')
parser.add_argument('-l', dest="labels", type=str, nargs='+', required=True, help='Label for each curve')
parser.add_argument('-m', dest="markers", type=str, nargs='+', required=False, help='Marker for each curve')
parser.add_argument('-s', dest="scale", type=str, required=False, help='Scale of y-axis')
parser.add_argument('-x', dest="xlabel", type=str, required=True, help='Label of x-axis')
parser.add_argument('-y', dest="ylabel", type=str, required=True, help='Label of y-axis')
parser.add_argument('-o', dest="outputfolder", required=True, help="Ouput folder")
parser.add_argument('-n', dest="name", required=True, help="Name of output plot")
parser.add_argument('-t', dest="title", required=True, help="Title of output plot")
args = parser.parse_args()
scale = None
if args.scale is not None:
scale = args.scale
index = 0
for f in args.files:
with open(f, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header = next(reader, None)
x, y = [], []
for row in reader:
x.append(int(row[0]))
if scale == "MB":
if "MB" in header[1]:
y.append(float(row[1]))
elif "KB" in header[1]:
y.append(float(row[1]) / float(1024))
elif scale == "GB":
if "MB" in header[1]:
y.append(float(row[1]) / float(1024))
elif "KB" in header[1]:
y.append(float(row[1]) / float(1024*1024))
else:
y.append(float(row[1]))
N = len(x)
x2 = np.arange(N)
if args.markers is not None:
plt.plot(x2, y, marker=args.markers[index], markersize=7, fillstyle='none', label=args.labels[index])
else:
plt.plot(x2, y, label=args.labels[index])
plt.xticks(x2, x, rotation=90)
index += 1
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.title(args.title)
plt.legend()
plt.tight_layout()
#plt.show()
plt.savefig(args.outputfolder+"/"+args.name+".eps", format="eps")
| [
"batsarasnikos@gmail.com"
] | batsarasnikos@gmail.com |
4f9561b6707673dd54e154e177dcfd3d25aebf98 | 3d276b170ed2255e8cee3b05fca4c6f75ef5eaae | /Consective-evens/solutionB.py | 3028f13b47118968e7dfa7109a1f3575ac7ff28e | [] | no_license | xdatageek/math_and_physics | 0db5a9e86c54bc7355cb6b0c47189f5de5b097ba | 5f1fafce9995c3be6ee02a5f0f4d8c45358e234e | refs/heads/main | 2023-02-09T19:00:01.665490 | 2021-01-02T04:37:35 | 2021-01-02T04:37:35 | 324,129,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | f = open('input.txt', 'r')
next(f)
A = [int(i) for i in f.readline().split()]
for k in range(int(f.readline())):
c = f.readline().split()
c1 = int(c[1])
c2 = int(c[2])
if c[0] == '1':
S = 0
for j in range(c1, c2+1):
S += A[j]
print(S)
else:
A[c1] = c2
| [
"noreply@github.com"
] | xdatageek.noreply@github.com |
97cb339e8b6bfc2cd89e1ed0be47bb4f41d910d8 | 1feffdfcdc376ce007d64c911ebbe31826bf217e | /Core/CoreEngine.py | 9100c47f796a722de061abd7fb2c2871a97ad97e | [] | no_license | Zaladar/rpgmotor | 1c3bfae5c332351b0d69559575ebe5a8cd2fdc55 | dccc84f73008132be18a2a9c389d02d68b14b28d | refs/heads/master | 2020-12-30T11:38:56.071865 | 2017-10-20T12:32:47 | 2017-10-20T12:32:47 | 91,575,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,185 | py | #1 standard livraries
from random import *
#2 Third party
#3 Local
dices = {}
class Dice:
def __init__(self, name, sides, mod):
self.name = name
self.sides = int(sides)
self.mod = int (mod)
# här borde finnas decorator som kan applicera mod
def rolling(self, t):
#printout här kanske ej bör finnas?
print("rolling your dice!")
res = 0
i = 0
while i < t:
res += randint(1, self.sides)
return res
# what die exists
def storeddie(self):
if self.name in dices:
print("name: " + str(self.name))
print("sides: " + str(self.sides))
# to create new dies if there is a need for it
def DiceCreator(self):
print(' =[Dice Creator]=')
print('=[used to create dice]=')
print(' ')
sides = input('how many sides does your dice have?:')
rn = input("want to name them? default name is d" + sides)
if rn.lower() == "yes":
name = input("name:")
elif rn.lower() == "no":
print("ok")
name = rn
else:
print("invalid input")
#dices[name] = Dice(name, sides) dettak anske inte ska vara kvar som så, eftersom funktionen borde returnera en variand av Dice
print("dices:" + ",".join([x for x in dices.keys()]))
def sides(self, name):
sidesqq = int(input("what do you wish to set your sides to?"))
if isinstance(sidesqq, int):
dices[name] = Dice(name, sidesqq)
else:
print("incvalid input")
bootupseq()
def rename(self, name):
qq = input("This will change the name of the dice proceed?")
if qq == "yes":
nameqq = input(" what do you want to call these dice?")
dices[name] = Dice(nameqq, self.sides)
elif qq == "no":
print("okay, rebootinng")
bootupseq()
else:
print("invalid input")
bootupseq()
def DiceBase():
pd = [2, 3, 4, 6, 8, 10, 12, 20, 100]
for i in pd:
dice = {
"name": 'd' + str(i),
"sides": i,
}
dices[dice["name"]] = Dice(**dice)
print("db done")
# en ny funktion spel kontrol ska skapas och där ska man kunna initiera spel boot up ska bara kunna kalla på spelinitiering karaktärs och tärnings förändringar och information.
def Gameinit():
type = input("what kind of game do you wish to play, pathfinder or dark heresy?")
if type.lower() == "pathfinder":
print("pathfinder is being setup!")
elif type.lower() == "dark heresy":
print("dark heresy is being setup!")
else:
print("invalid input,returning to boot up sequence!")
bootupseq()
def bootupseq():
while True:
ans = input('what function do you want to start? type help for... help...:').lower()
if ans == 'dice creator':
Dice.DiceCreator()
elif ans == 'rolling':
name = input("what dice do you wish to use?")
if name in dices.keys():
Dice.rolling(dices[name])
else:
print("invalid input, dices doesn't exist! use dice creator")
elif ans == "existing die":
print("dices: " + ",".join([x for x in dices.keys()]))
req = input("do you want to look at any of the dice? yes/no:")
req = req.lower()
if req == "yes":
name = input("what dice?")
if name.lower() in dices.keys():
Dice.storedie(dices[name])
else:
print("not in dice")
elif ans == "game init":
qq = input("what game? Pathfinder or Dark heresy").lower()
if qq == ("pathfinder"|"dark heresy"):
Gameinit(qq)
elif ans == 'help':
print('lol noob')
print('functions available: Dice creator, Existing die, Game init and Rolling')
elif ans == 'break':
break
else:
print('invalid input')
print("input:" + ans)
DiceBase()
bootupseq() | [
"oscarjwhaglund@gmail.com"
] | oscarjwhaglund@gmail.com |
f832144531d3e829e3b9637112237b07b3bc34c5 | 290e0f86fd9cd2881e82b44a308beb1b7f657fb7 | /assistant_utils.py | 577de88ddc415bc0298dcd7c491b48e2555c4adb | [] | no_license | essalj/ai_assistant | 0d2696a90a3a29e28461f95abec6fb8685298f1c | 5a592b86015a8e0fc6ea961eeddf1b885dbe900d | refs/heads/main | 2023-04-22T21:57:22.568114 | 2021-04-25T12:45:20 | 2021-04-25T12:45:20 | 325,210,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
import pyautogui
#screen_shot(file_name)
def screen_shot(file_name):
screenshot = pyautogui.screenshot()
file_name = file_name + ".png"
screenshot.save(file_name)
print("file saved here: " + file_name)
#screen_shot("test_fil")
| [
"noreply@github.com"
] | essalj.noreply@github.com |
e195f2cada6fd440c8ec551ec4c3ca40c8efe9b4 | f0af90e1a5e9cd73682a42f0295574d70450c62e | /test.py | a4640244851e505dad79503dd608447c9eebf3f1 | [] | no_license | k3ch-jo/mocha | 9c4899e1157ad547b60c5e86c6587b9f4e917899 | 4142da6dd12a866d3d82f0e311c023f00fed83a7 | refs/heads/main | 2022-12-23T08:37:37.497990 | 2020-10-05T07:38:03 | 2020-10-05T07:38:03 | 301,324,930 | 0 | 0 | null | 2020-10-05T07:38:04 | 2020-10-05T07:21:42 | Python | UTF-8 | Python | false | false | 44 | py | #says goodmorning
print("Good Morning")
| [
"noreply@github.com"
] | k3ch-jo.noreply@github.com |
e2072af249848ec0134b8e52255016c3d1ed8c07 | 6270ed787bfa3080975ce4f0a27a733b391f4f84 | /22Septembre.py | 1adf9ba336df6550b653abf92fdcb14a6d3712dd | [] | no_license | lucasdavid47/IPT_Sup | b6a8aec625e32903af1704f95f356205e6d43ba4 | 72ee4fa79c8b5f305cbe82d72d5161d6da32e86d | refs/heads/master | 2023-01-07T19:04:15.092690 | 2020-11-04T07:10:05 | 2020-11-04T07:10:05 | 294,027,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | #Somme
A=[]
B=[]
n =len(A) #nombre de lignes de A
C=[]
for i in range(0, n):
L=[]
for j in range(0, n):
L.append(A[i][j]+B[i][j])
C.append(L)
for x in C:
print(x) #affichage ligne par ligne de C
#Produit
n = len(A)
D=[]
for i in range(0, n):
L=[]
for j in range(0, n):
S=0
for k in range(0,n):
S += A[i][k]*B[k][j]
L.append(S)
D.append(L)
for x in D:
print(x)
#Python
L=[1,2,3,4,5,6]
for k in range(0, len(L)):
if(L[k]>3):
L[k] *=2
else:
L[k] *=3
print[L] | [
"lucas.david44000@gmail.com"
] | lucas.david44000@gmail.com |
803d74d1d4c3dd4a3fb7412c178cd596d5ac7f41 | 2f3278709d2409a4a20b7f79d31dfed43aac6557 | /LiTS_Decide_Preprocessing/TumorNetAvgpoolRelu.py | 893eb9d11c26f76fac1a4be2dec161ac10e44d82 | [] | no_license | HaoW14/Preprocess-of-CT-data | 8678ff749c644a0881d8305d6c6b51d8bf87912a | 811d95f28c11abc2b4b0305806397602ab4d51ba | refs/heads/master | 2022-12-28T01:02:39.929089 | 2020-10-18T10:31:27 | 2020-10-18T10:31:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,773 | py | # TumorNet without source; Downsample by pooling; activation is relu
# Which can use transfer lerning directly
import torch
from torch import nn
import numpy as np
import SimpleITK as sitk
from medpy import metric
def saved_preprocessed(savedImg,origin,direction,xyz_thickness,saved_name):
origin = tuple(k.item() for k in origin)
direction = tuple(k.item() for k in direction)
xyz_thickness = tuple(k.item() for k in xyz_thickness)
savedImg = np.squeeze(np.argmax(savedImg.detach().cpu().numpy(),axis=1),0).astype(np.float32)
newImg = sitk.GetImageFromArray(savedImg)
newImg.SetOrigin(origin)
newImg.SetDirection(direction)
newImg.SetSpacing(xyz_thickness)
sitk.WriteImage(newImg, saved_name)
def Dice(output2, target):
pred_lesion = np.argmax(output2.detach().cpu().numpy(), axis=1)
target = np.squeeze(target.detach().cpu().numpy(), axis=1)
true_lesion = target == 2
# Compute per-case (per patient volume) dice.
if not np.any(pred_lesion) and not np.any(true_lesion):
tumor_dice = 1.
print('tumor_dice = 1')
else:
tumor_dice = metric.dc(pred_lesion, true_lesion)
return tumor_dice
def one_hot(scores, labels):
_labels = torch.zeros_like(scores)
_labels.scatter_(dim=1, index=labels.long(), value=1)#scatter_(input, dim, index, src)
return _labels
class DiceLoss(nn.Module):
def __init__(self):
super().__init__()
self.smooth = 1e-5
def forward(self, output2, target):
temp = target.clone() # deep clone
temp[target == 2] = 1
temp[target <= 1] = 0
target2 = one_hot(output2, temp)
intersection2 = 2. * (output2 * target2).sum()
denominator2 = output2.sum() + target2.sum()
dice2 = (intersection2 + self.smooth) / (denominator2 + self.smooth)
dice = 1 - dice2
return dice
class PostRes(nn.Module):
def __init__(self, n_in, n_out, stride = 1):
super(PostRes, self).__init__()
self.resBlock = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size=3, stride=stride, padding=1),
nn.InstanceNorm3d(n_out),
nn.ReLU(inplace=True),
# nn.PReLU(),
nn.Conv3d(n_out, n_out, kernel_size=3, padding=1),
nn.InstanceNorm3d(n_out)
)
self.relu = nn.ReLU(inplace=True)
# self.prelu = nn.PReLU()
if stride != 1 or n_out != n_in:
self.shortcut = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size = 1, stride = stride),
nn.InstanceNorm3d(n_out))
else:
self.shortcut = None
def forward(self, x):
residual = x
if self.shortcut is not None:
residual = self.shortcut(x)
out = self.resBlock(x)
out += residual
out = self.relu(out)
# out = self.prelu(out)
return out
class Decoder2(nn.Module):
def __init__(self):
super().__init__()
self.num_blocks_back = [3, 3, 2, 2] # [5-2]
self.nff = [1, 8, 16, 32, 64, 128] # NumFeature_Forw[0-5]
self.nfb = [64, 32, 16, 8, 2] # NunFeaturn_Back[5-0]
#deconv4-1,output
self.deconv4 = nn.Sequential(
nn.ConvTranspose3d(self.nff[5], self.nfb[0], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[0]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.deconv3 = nn.Sequential(
nn.ConvTranspose3d(self.nfb[0], self.nfb[1], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[1]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose3d(self.nfb[1], self.nfb[2], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[2]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.deconv1 = nn.Sequential(
nn.ConvTranspose3d(self.nfb[2], self.nfb[3], kernel_size=2, stride=2),
nn.InstanceNorm3d(self.nfb[3]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
self.output = nn.Sequential(
nn.Conv3d(self.nfb[3], self.nfb[3], kernel_size=1),
nn.InstanceNorm3d(self.nfb[3]),
nn.ReLU(inplace=True),
# nn.PReLU(),
# nn.Dropout3d(p = 0.3),
nn.Conv3d(self.nfb[3], self.nfb[4], kernel_size=1)) # since class number = 3 and split into 2 branch
#backward4-1
for i in range(len(self.num_blocks_back)):
blocks = []
for j in range(self.num_blocks_back[i]):
if j == 0:
blocks.append(PostRes(self.nfb[i] * 2, self.nfb[i]))
else:
blocks.append(PostRes(self.nfb[i], self.nfb[i]))
setattr(self, 'backward' + str(4-i), nn.Sequential(*blocks))
self.drop = nn.Dropout3d(p=0.5, inplace=False)
self.softmax = nn.Softmax(dim=1)#(NCDHW)
def forward(self, layer1, layer2, layer3, layer4, layer5):
# decoder
up4 = self.deconv4(layer5)
cat_4 = torch.cat((up4, layer4), 1)
layer_4 = self.backward4(cat_4)
up3 = self.deconv3(layer_4)
cat_3 = torch.cat((up3, layer3), 1)
layer_3 = self.backward3(cat_3)
up2 = self.deconv2(layer_3)
cat_2 = torch.cat((up2, layer2), 1)
layer_2 = self.backward2(cat_2)
up1 = self.deconv1(layer_2)
cat_1 = torch.cat((up1, layer1), 1)
layer_1 = self.backward1(cat_1)
layer_1 = self.output(layer_1)
layer_1 = self.softmax(layer_1)
return layer_1
class TumorNet(nn.Module):
def __init__(self):
super(TumorNet, self).__init__()
self. nff = [1, 8, 16, 32, 64, 128]#NumFeature_Forw[0-5]
self.num_blocks_forw = [2, 2, 3, 3]#[2-5]
# forward1
self.forward1 = nn.Sequential(
nn.Conv3d(self.nff[0], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
nn.ReLU(inplace=True),
# nn.PReLU(),
nn.Conv3d(self.nff[1], self.nff[1], kernel_size=3, padding=1),
nn.InstanceNorm3d(self.nff[1]),
nn.ReLU(inplace=True)
# nn.PReLU()
)
# forward2-5
for i in range(len(self.num_blocks_forw)): # 4
blocks = []
for j in range(self.num_blocks_forw[i]): # {2,2,3,3}
if j == 0: # conv
###plus source connection
blocks.append(PostRes(self.nff[i + 1], self.nff[i + 2]))
else:
blocks.append(PostRes(self.nff[i + 2], self.nff[i + 2]))
setattr(self, 'forward' + str(i + 2), nn.Sequential(*blocks))
self.avgpool = nn.AvgPool3d(kernel_size=2, stride=2)
self.maxpool = nn.MaxPool3d(kernel_size=2, stride=2)
# downsamp1-4 by stride convolution
# self.downsamp1 = nn.Conv3d(self.nff[1], self.nff[2], kernel_size=3, stride=2, padding=1)
# self.downsamp2 = nn.Conv3d(self.nff[2], self.nff[3], kernel_size=3, stride=2, padding=1)
# self.downsamp3 = nn.Conv3d(self.nff[3], self.nff[4], kernel_size=3, stride=2, padding=1)
# self.downsamp4 = nn.Conv3d(self.nff[4], self.nff[5], kernel_size=3, stride=2, padding=1)
self.decoder2 = Decoder2()
self.drop = nn.Dropout3d(p=0.5, inplace=False)
def forward(self, input):
#encoder
layer1 = self.forward1(input)
down1 = self.maxpool(layer1)
# down1 = self.downsamp1(layer1)
layer2 = self.forward2(down1)
down2 = self.maxpool(layer2)
# down2 = self.downsamp2(layer2)
layer3 = self.forward3(down2)
down3 = self.maxpool(layer3)
# down3 = self.downsamp3(layer3)
layer4 = self.forward4(down3)
down4 = self.maxpool(layer4)
# down4 = self.downsamp4(layer4)
layer5 = self.forward5(down4)
# decoder
branch2 = self.decoder2(layer1, layer2, layer3, layer4, layer5)
return branch2
def main():
net = TumorNet().cuda()#necessary for torchsummary, must to cuda
from torchsummary import summary
summary(net, input_size=(1,64,256,256))#must remove the number of N
# input = torch.randn([1,1,64,256,256]).cuda()#(NCDHW)
# output = net(input)
# print(output.shape)
# print('############net.named_parameters()#############')
# for name, param in net.named_parameters():
# print(name)
if __name__ == '__main__':
main() | [
"lihuiyu23@gmail.com"
] | lihuiyu23@gmail.com |
637aebc9dc0ee30985a63efc692a3f892fbed308 | c6f9a46393048add6fad888d382978b9be12dd4c | /python/ql/test/experimental/dataflow/strange-pointsto-interaction-investigation/src/urandom_problem.py | d4a06529cf60991084b7d954d234703134c192b9 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | luchua-bc/ql | 6e9480e8c92cbb12570fcc7f65366bfdd54dad06 | a1d9228a66cb80329041fa8d95b08ce5697dec54 | refs/heads/master | 2023-01-23T17:11:54.776916 | 2022-07-20T14:36:37 | 2022-07-20T14:36:37 | 248,313,302 | 4 | 0 | MIT | 2023-01-16T09:13:30 | 2020-03-18T18:35:48 | CodeQL | UTF-8 | Python | false | false | 920 | py | # These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
# ------------------------------------------------------------------------------
# Actual tests
# ------------------------------------------------------------------------------
def give_src():
return SOURCE
foo = give_src()
SINK(foo) # $ flow="SOURCE, l:-3 -> foo"
import os
cond = os.urandom(1)[0] > 128 # $ unresolved_call=os.urandom(..)
if cond:
pass
if cond:
pass
foo = give_src() # $ unresolved_call=give_src()
SINK(foo) # $ unresolved_call=SINK(..) MISSING: flow="SOURCE, l:-15 -> foo"
| [
"rasmuswl@github.com"
] | rasmuswl@github.com |
cc878c320008f8db66aa030c2f2f6bc3e205a9cc | 6d1728bf105a7d6481d0bbca2b88f4478e0632d9 | /study/ch1/area.py | 1a498690da37f4f891110371603717db2e529035 | [] | no_license | Phantomn/Python | 00c63aceb2d4aa0db71fe5e33fe8b5159b41aadd | 12808adf4b52c60cfe94befb6daa1e8187224beb | refs/heads/Python | 2022-11-09T16:49:49.165884 | 2019-08-05T07:30:07 | 2019-08-05T07:30:07 | 44,149,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | horizon=0
vertical=0
print("Input horizon length : ", end="")
horizon=int(input())
print("Input vertical length : ",end="")
vertical=int(input())
print("rectangle is %d."%(horizon*vertical))
| [
"tmdvyr123@naver.com"
] | tmdvyr123@naver.com |
bd8527aee37e224f869349bec2f6fb2bdadc1d5b | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/예외처리_20200709144804.py | 9b8a16ecb397905296a8e33b88abcd084eadb309 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | try:
print("나누기 전용 계산기입니다.")
num1 = int(input("첫 번째 숫자를 입력하세요 : "))
num2 = int(input("두 번째 숫자를 입력하세요 : "))
print("{0} / {1} = {2}".format(n)) | [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
803d49b2b49af27d2ac57b3a4e8ff335cdd579a8 | c96eab97976aa7fa60320d8b7de74f5148c7bf25 | /edf/g1997.py | 02cd9c1f7ee960c1bd6fc5b13dd077532754084c | [] | no_license | alexis-roche/scripts | 869eb9063e8b31a0e13284aeb777cc152f822f02 | aaae389a3fa5a0c6ff619034bdc5825a5f77a995 | refs/heads/master | 2021-01-02T23:07:33.730063 | 2012-05-08T06:59:06 | 2012-05-08T06:59:06 | 1,047,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | from game import Game
g1997 = []
#
g = Game('22 Jan 1997','portugal','f','away',2,0)
g.players = ['barthez','thuram','blanc','desailly','laigle','karembeu',
'deschamps','zidane','ba','dugarry','pires']
g.subs = ['ngotty','djorkaeff','loko','blondeau']
g1997.append(g)
#
g = Game('26 Feb 1997','netherlands','f','home',2,1)
g.players = ['lama','thuram','blanc','desailly','lizarazu','karembeu',
'vieira','zidane','laigle','ba','dugarry']
g.subs = ['candela','ngotty','pires','loko']
g1997.append(g)
#
g = Game('2 Apr 1997','sweden','f','home',1,0)
g.players = ['barthez','thuram','blanc','desailly','candela','ba',
'makelele','zidane','vieira','djorkaeff','dugarry']
g.subs = ['blondeau','keller','gava','djetou','loko']
g1997.append(g)
#
g = Game('3 Jun 1997','brazil','f','home',1,1)
g.players = ['barthez','candela','blanc','desailly','lizarazu','karembeu',
'deschamps','zidane','ba','maurice','pires']
g.subs = ['thuram','vieira','keller']
g1997.append(g)
#
g = Game('7 Jun 1997','england','f','home',0,1)
g.players = ['barthez','thuram','blanc','ngotty','laigle','djorkaeff',
'vieira','deschamps','dugarry','ouedec','keller']
g.subs = ['lizarazu','zidane','loko']
g1997.append(g)
#
g = Game('11 Jun 1997','italy','f','home',2,2)
g.players = ['charbonnier','thuram','leboeuf','desailly','lizarazu','ba',
'karembeu','zidane','deschamps','dugarry','maurice']
g.subs = ['ngotty','vieira','djorkaeff']
g1997.append(g)
#
g = Game('11 Oct 1997','south africa','f','home',2,1)
g.players = ['letizi','thuram','blanc','desailly','candela','deschamps',
'djorkaeff','petit','pires','guivarch','henry']
g.subs = ['laigle','ba','boghossian','zidane']
g1997.append(g)
#
g = Game('12 Nov 1997','scotland','f','home',2,1)
g.players = ['barthez','thuram','blanc','desailly','laigle','ba',
'deschamps','zidane','petit','laslandes','guivarch']
g.subs = ['candela','gava','boghossian','djorkaeff']
g1997.append(g)
| [
"alexis.roche@gmail.com"
] | alexis.roche@gmail.com |
0f983bfba296e9069d60f2ce24011761d28fcdc7 | fcfc18039d05878f6156536a6757832a3576147b | /Final_Project_Code_IS590PR_Functions.py | 47773ad62be23389aa8a872651a9c749d91fc25a | [] | no_license | rahulrohri/IS590PR-Spring-2020-Final-Project | 76fa6d8a0a56e39c155a0cc718ce0f06bd14d225 | f101cb2ceacf83c3e24fb07f63e5456497e8d116 | refs/heads/master | 2022-07-07T12:51:07.558538 | 2020-05-13T11:45:13 | 2020-05-13T11:45:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,195 | py | """
IS590 PR - University of Illinois - Urbana Champaign
This file is a list of functions that have been used for our project and are
Intended to support the jupyter notebook titled Final_Project_Code_IS590PR.ipynb
Name: NYC_Public_Safety_Functions.py
Team Members :
Megha Manglani (GitHub id – meghamm2)
Rahul Ohri (GitHub id- rahulrohri)
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
my_dir = 'C:/Users/rahul/Downloads/UIUC/Sem 2 - Spring 2020/Courses/Programing Analytics/Final Project/DataSets/' #https://github.com/iSchool-590pr/PR_Sp20_examples/blob/master/week_07/class7_pandas_pt2.ipynb
NYPD_Arrests = my_dir +'NYPD_Arrests_Data__Historic_.csv' # Loading NYPD Arrest Data file
Complaints = my_dir +'NYPD_Complaint_Data_Historic.csv' # Loading NYPD Complaints Data file
EMS_incident = my_dir +'EMS_Incident_Dispatch_Data.csv' # Loading EMS incident dispatch Data file
def dataset_validation():
"""
This function is used to check if the input data files are having the correct column headers that are needed
for our hypotheses analysis. The files need to be loaded from the local computer directory since they are
very large. If the files are not present , then the user can download it from a google drive link provided
by our team or can go to the official website from which the data was downloaded.
>>> NYPD_Arrests = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Arrest_Correct.csv' # Loading NYPD Arrest Data file
>>> Complaints = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Complaints_Correct.csv' # Loading NYPD Complaints Data file
>>> EMS_incident = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/EMS_Correct.csv' # Loading EMS incident dispatch Data file
>>> dataset_validation()
The columns necessary for analysis are present in the EMS data file
The columns necessary for analysis are present in the Complaints data file
The columns necessary for analysis are present in the Arrests data file
"""
data_EMS = pd.read_csv(EMS_incident, nrows=1)
data_Complaints = pd.read_csv(Complaints, nrows=1)
data_Arrest = pd.read_csv(NYPD_Arrests, nrows=1)
All_Col_list_EMS = list(data_EMS)
All_Col_list_Complaints = list(data_Complaints)
All_Col_list_Arrest = list(data_Arrest)
Req_Complaints_cols = ['CMPLNT_NUM', 'CMPLNT_FR_DT', 'BORO_NM', 'VIC_RACE', 'OFNS_DESC']
Req_Arrests_cols = ['ARREST_BORO', 'ARREST_DATE', 'ARREST_KEY']
Req_EMS_cols = ['INCIDENT_RESPONSE_SECONDS_QY', 'INCIDENT_DATETIME', 'BOROUGH']
check_EMS = all(item in All_Col_list_EMS for item in
Req_EMS_cols) # https://www.techbeamers.com/program-python-list-contains-elements/
if check_EMS is True:
print("The columns necessary for analysis are present in the EMS data file")
else:
print(
"The columns necessary for analysis are not present in the EMS data file. Kindly download the dataset files from - https://drive.google.com/open?id=1g_StaWiaWQyNjNOu3wlFKG2dsIJZjyjF or the latest file from https://data.cityofnewyork.us/Public-Safety/EMS-Incident-Dispatch-Data/76xm-jjuj")
check_Complaints = all(item in All_Col_list_Complaints for item in
Req_Complaints_cols) # https://www.techbeamers.com/program-python-list-contains-elements/
if check_Complaints is True:
print("The columns necessary for analysis are present in the Complaints data file")
else:
print(
"The columns necessary for analysis are not present in the Complaints data file. Kindly download the dataset files from - https://drive.google.com/open?id=112LOH-fYjUn5AHVnFbgQYRAAhSCcXvjq or the latest file from https://data.cityofnewyork.us/Public-Safety/NYPD-Complaint-Data-Historic/qgea-i56i ")
check_Arrests = all(item in All_Col_list_Arrest for item in
Req_Arrests_cols) # https://www.techbeamers.com/program-python-list-contains-elements/
if check_Arrests is True:
print("The columns necessary for analysis are present in the Arrests data file")
else:
print(
"The columns necessary for analysis are not present in the Arrests data file. Kindly download the dataset files from - https://drive.google.com/open?id=1g_StaWiaWQyNjNOu3wlFKG2dsIJZjyjF or the latest file from https://catalog.data.gov/dataset/nypd-arrests-data-historic")
def get_file(file, cols) -> pd.DataFrame:
"""
This function produced a dataframe that consists of the columns that are needed fr analysis from a datafile.
Since in our project we are using between 2 - 4 columns for each hypothesis analysis rather than loading the
entire data file, the dataframe will end up containing only between 2-4 columns.
>>> test_file = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Airplane.csv'
>>> print("Enter 'AircraftHex' and 'SessionID' as column names")
Enter 'AircraftHex' and 'SessionID' as column names
>>> answer = get_file(test_file,2)
enter your column name 1 and press enter:enter your column name 2 and press enter:
>>> answer.iloc[0]['AircraftHex']
'A902B5'
>>> test_file = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Airplane.csv'
>>> get_file(test_file,5)
'Invalid number of columns'
"""
if cols == 2:
col1 = input('enter your column name 1 and press enter:')
# print(type(col1))
col2 = input('enter your column name 2 and press enter:')
col_list = [col1, col2]
elif cols == 3:
col1 = input('enter your column name 1 and press enter:')
# print(type(col1))
col2 = input('enter your column name 2 and press enter:')
col3 = input('enter your column name 3 and press enter:')
col_list = [col1, col2, col3]
elif cols == 4:
col1 = input('enter your column name 1 inside and press enter:')
col2 = input('enter your column name 2 inside and press enter:')
col3 = input('enter your column name 3 inside and press enter:')
col4 = input('enter your column name 4 inside and press enter:')
col_list = [col1, col2, col3, col4]
else:
return "Invalid number of columns"
data_file = pd.read_csv(file, usecols=col_list) # Import only necessary columns from the dataset
return data_file
# Extracting the Month and Year of the incident
def extract_year_month(x, old_col: str, month_column: str, year_column: str):
"""
This function is used to extract the year and the month from an existing dataframe column that
contains date values in the format mm/dd/yyyy. The extraction process results in the formation
of two new columns in the dataframe - one containing only the months and the other containing only
the year values.
:param x: The dataframe on which opeartions are to be performed
:param old_col: The dataframe column containing date in format mm/dd/yyyy
:param month_column: The dataframe column to be created post extraction of the month from the column name old_col
:param year_column: The dataframe column to be created post extraction of the year from the column name old_col
>>> sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/sample_date_func.csv'
>>> sample_df = pd.read_csv(sample_csv)
>>> answer = extract_year_month(sample_df,'Date','Month','Year')
>>> answer.iloc[0]['Population'] #doctest: +NORMALIZE_WHITESPACE
8300124
"""
x[month_column] = x[old_col].str[:2]
x[year_column] = x[old_col].str[6:10]
return x
def get_arrest_or_crime_count(dfname, col_year, col_month, col_boro, col_key) -> pd.core.frame.DataFrame:
"""
This function is used to create a multilevel index dataframe that groups the data by the
neighbourhood, year , and month columns and finally produces a column of the aggragation
type as count to display either the total count of arrests or the total count of complaints
:param dfname: The dataframe on which opeartions are to be performed
:param col_year: Column name containing year value
:param col_month: Column name containing month value
:param col_boro:Column name containing borough/area value
:param col_key: Column name on which aggreagation has to be done
:return: a dataframe containing year,month,neighbourhood,aggregated column count
>>> sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/arrest_crime_count_function.csv'
>>> sample_df = pd.read_csv(sample_csv)
>>> answer = get_arrest_or_crime_count(sample_df,'Complaint_Filed_Year','Complaint_Filed_Month','BORO_NM','CMPLNT_NUM')
>>> answer.index.levels[1]
Int64Index([2006, 2009, 2011, 2015], dtype='int64', name='Complaint_Filed_Year')
"""
# replacing Borough initials with complete names
dfname = dfname.dropna(subset=[col_year])
dfname = dfname.astype({col_year: 'int64'})
dfname = dfname[dfname[col_year] > 2005]
data_count = dfname.groupby([col_boro, col_year, col_month]).agg({col_key: [
'count']}) # https://pandas.pydata.org/pandas-docs/version/0.23.1/generated/pandas.core.groupby.DataFrameGroupBy.agg.html
return data_count
def plot_graph(n, df, l, b, var1: str, var2: str, var3: str, var4: str, var5: str, constant: str):
'''
This function is used to plot a line graph for a multilevel index dataframe. The graph has multiple
subplots as well depending upon the number of index groups in the first column of the dataframe. eg
in one datframe we have 5 boroughs that are used as the grouping column and thus we will have 5
subplots
:param n: number of subplots
:param df: the dataframe for which graphs have to be plotted
:param l: length of the subplot figure
:param b: breadth of the subplot figure
:param var1: Index value to be plotted
:param var2: Index value to be plotted
:param var3: Index value to be plotted
:param var4: Index value to be plotted
:param var5: Index value to be plotted
:param constant: Constant part of text to be displayed in the title of the subplot
# https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html
>>> arrays = [np.array(['india', 'USA', 'italy', 'italy', 'india', 'canada', 'india', 'USA','australia']),np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two','one'])]
>>> df_new = pd.DataFrame(np.random.randn(9, 5), index=arrays) #Creating a dummy multi-index dataframe
>>> plot_graph(5,df_new,18,20,'india','USA','italy','australia','canada','Just testing')
'''
figure, axis = plt.subplots(n, 1, figsize=(
l, b)) # https://stackoverflow.com/questions/25386870/pandas-plotting-with-multi-index
df.xs(var1).plot(kind='line', ax=axis[0]).set_title(
var1 + ' - ' + constant) # https://pandas.pydata.org/pandas-docs/version/0.16.0/visualization.html
df.xs(var2).plot(kind='line', ax=axis[1]).set_title(var2 + ' - ' + constant)
df.xs(var3).plot(kind='line', ax=axis[2]).set_title(var3 + ' - ' + constant)
df.xs(var4).plot(kind='line', ax=axis[3]).set_title(var4 + ' - ' + constant)
df.xs(var5).plot(kind='line', ax=axis[4]).set_title(var5 + ' - ' + constant)
def race_percentage(row, colname) -> pd.core.series.Series:
# https://stackoverflow.com/questions/26886653/pandas-create-new-column-based-on-values-from-other-columns-apply-a-function-o
# https://worldpopulationreview.com/us-cities/new-york-city-population/
# total NYC population = 8175133
# Creating a function to add crime per capita values
"""
This function is used to return a pandas series that has the race percentage value of all the different
races present in NYC.
:param row: denotes that the operation has to be performed across rows
:param colname: Column name on which operation has to be done
:return : a specific numeric value if a row match is found
>>> data_dummy = {'Race': ['AMERICAN INDIAN/ALASKAN NATIVE','ASIAN / PACIFIC ISLANDER', 'BLACK','BLACK HISPANIC','WHITE','WHITE HISPANIC','UNKNOWN/OTHER'],'Offense': ['FRAUDS', 'BURGLARY','HARRASSMENT 2','FORGERY','FRAUDS','FRAUDS','FRAUDS'],'Comp_no':[1,2,3,4,5,6,7]}
>>> df_dummy = pd.DataFrame (data_dummy, columns = ['Race','Offense','Comp_no'])
>>> df_dummy.apply (lambda row: race_percentage(row,'Race'), axis=1)
0 0.0043
1 0.1400
2 0.2195
3 0.0233
4 0.3214
5 0.1053
6 0.1862
dtype: float64
"""
if row[colname] == 'AMERICAN INDIAN/ALASKAN NATIVE':
return 0.0043
if row[colname] == 'ASIAN / PACIFIC ISLANDER':
return 0.14
if row[colname] == 'BLACK':
return 0.2195
if row[colname] == 'BLACK HISPANIC':
return 0.0233
if row[colname] == 'WHITE':
return 0.3214
if row[colname] == 'WHITE HISPANIC':
return 0.1053
if row[colname] == 'UNKNOWN/OTHER':
return 0.1862
def offense_per_victim_race(dataframe_name) -> pd.DataFrame:
"""
This function returns a dataframe that contains the information pertaining to the offense committed
and the victims. It also takes the victim race into consideration and has a column that has normalized
complaint numbers based on that race percentage
:param dataframe_name:
>>> sample = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/offense_per_victim_race_sample.csv'
>>> sample_df = pd.read_csv(sample)
>>> ans = offense_per_victim_race(sample_df)
>>> ans.iloc[0]['race_percentage']
0.14
"""
dataframe_name.VIC_RACE = dataframe_name.VIC_RACE.fillna('UNKNOWN') # replacing nans with 'UNKNOWN'
dataframe_name = dataframe_name.replace({'VIC_RACE': 'UNKNOWN'}, 'UNKNOWN/OTHER')
dataframe_name = dataframe_name.replace({'VIC_RACE': 'OTHER'}, 'UNKNOWN/OTHER')
# Selecting only a particular set of crimes that involve harming another human
type_of_crime = ['HARRASSMENT 2', 'BURGLARY', 'ROBBERY', 'FELONY ASSAULT', 'SEX CRIMES', 'OFFENSES INVOLVING FRAUD',
'RAPE', 'THEFT-FRAUD', 'MURDER & NON-NEGL. MANSLAUGHTER', 'KIDNAPPING & RELATED OFFENSES',
'OFFENSES RELATED TO CHILDREN', 'KIDNAPPING', 'OTHER OFFENSES RELATED TO THEF', 'PETIT LARCENY',
'GRAND LARCENY', 'FORGERY', 'FRAUDS', 'ASSAULT 3 & RELATED OFFENSES']
# https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/
dataframe_name = dataframe_name[dataframe_name.OFNS_DESC.isin(type_of_crime)]
# race_count = complaints_df_new.groupby(['OFNS_DESC','VIC_RACE']).agg({'CMPLNT_NUM': ['count']}).reset_index()
dataframe_name = dataframe_name.groupby(["OFNS_DESC", "VIC_RACE"], as_index=False).count()
dataframe_name = dataframe_name[['OFNS_DESC', 'VIC_RACE', 'CMPLNT_NUM']]
dataframe_name.apply(lambda row: race_percentage(row, 'VIC_RACE'),
axis=1) # https://stackoverflow.com/questions/26886653/pandas-create-new-column-based-on-values-from-other-columns-apply-a-function-o
dataframe_name['race_percentage'] = dataframe_name.apply(lambda row: race_percentage(row, 'VIC_RACE'), axis=1)
# race_count_new['race_population'] = race_count_new['race_percentage']*8175133
# race_count_new['race_population'] = race_count_new['race_population'].astype('int64')
dataframe_name['Normalized results'] = dataframe_name['CMPLNT_NUM'] / dataframe_name['race_percentage']
dataframe_name['Normalized results'] = dataframe_name['Normalized results'].astype('int64')
return dataframe_name
def population_density_details(filename) -> pd.core.frame.DataFrame:
'''
This function returns a dataframe which contains the population density for each neighbourhood.
The area in sq.km column is added manually.
:param filename: The population CSV file from which dataframe needs to be created
>>> nyc_population_sample = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/New_York_City_Population_sample.csv'
>>> ans = population_density_details(nyc_population_sample)
>>> ans.iloc[0]['Population']
147388
'''
area_population = pd.read_csv(filename)
area_population = area_population[area_population['Year'] > 2000]
area_population_sum = area_population.groupby(['Borough'])['Population'].sum()
borough_pop_df = area_population_sum.to_frame().reset_index()
borough_pop_df['Area in sq. km'] = [109.04, 183.42, 59.13, 281.09,
151.18] # https://en.wikipedia.org/wiki/Demographics_of_New_York_City
borough_pop_df['Population Density'] = borough_pop_df['Population'] / borough_pop_df['Area in sq. km']
borough_pop_df['Borough'] = borough_pop_df['Borough'].str.upper()
borough_pop_df = borough_pop_df.rename(columns={"Borough": "BORO_NM"})
borough_pop_df['Population Density'] = borough_pop_df['Population Density'].astype('int64')
return borough_pop_df
def corr_coeff(col1, col2) -> np.float64:
"""
:param col1: The first dataframe column you want to use for correlation calculation
:param col2: The second dataframe column you want to use for correlation calculation
:return: The correlation value between the two columns which is of numpy float data type
>>> sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Correlation_dummy.csv'
>>> sample_df = pd.read_csv(sample_csv)
>>> corr_coeff(sample_df['Age'],sample_df['Height(m)'])
0.7723621551319031
>>> data_dummy = {'Weight': [55,66,77,88,99,33],'Age': [22,33,44,55,66,15]}
>>> df_dummy = pd.DataFrame (data_dummy, columns = ['Weight','Age'])
>>> corr_coeff(df_dummy['Weight'],df_dummy['Age'])
0.9787474369757403
"""
plt.scatter(col1, col2)
correlation = col1.corr(col2)
# rp_corr = rp.corr_pair(col1,col2)
return correlation
# Selecting only a particular set of crimes that involve harming another human
NYC_Population = my_dir + 'New_York_City_Population.csv' # Loading NYPD Arrest Data file
Pop_density_df = population_density_details(NYC_Population)
def get_crime_results(dfname) -> pd.core.frame.DataFrame:
'''
This function returns a dataframe which has the complaints per capita for each borough and offense
:param dfname: NYC complaints dataframe on which operations have to be performed
>>> sample_NYC_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/NYC_get_crime.csv'
>>> sample_NYC_dframe = pd.read_csv(sample_NYC_csv)
>>> Pop_density_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Dummy_pop_density.csv'
>>> Pop_density_df = pd.read_csv(Pop_density_csv)
>>> ans = get_crime_results(sample_NYC_dframe)
>>> ans.iloc[0]['Population']
2504700
'''
type_of_crime = ['HARRASSMENT 2', 'BURGLARY', 'ROBBERY', 'FELONY ASSAULT', 'SEX CRIMES', 'OFFENSES INVOLVING FRAUD',
'RAPE', 'THEFT-FRAUD', 'MURDER & NON-NEGL. MANSLAUGHTER', 'KIDNAPPING & RELATED OFFENSES',
'OFFENSES RELATED TO CHILDREN', 'KIDNAPPING', 'OTHER OFFENSES RELATED TO THEF', 'PETIT LARCENY',
'GRAND LARCENY', 'FORGERY', 'FRAUDS', 'ASSAULT 3 & RELATED OFFENSES']
# https://cmdlinetips.com/2018/02/how-to-subset-pandas-dataframe-based-on-values-of-a-column/
dfname = dfname[dfname.OFNS_DESC.isin(type_of_crime)]
# complaints_df_new.OFNS_DESC.unique()
g1 = dfname.groupby(["OFNS_DESC", "BORO_NM"], as_index=False).count()
g1 = g1[["OFNS_DESC", "BORO_NM", "CMPLNT_NUM"]]
# merging with population density dataframe to add necessary density columns
Crime_result_df = pd.merge(g1, Pop_density_df, how='left', left_on='BORO_NM', right_on='BORO_NM')
# creating the per capita values by dividing complaint numbers and population
Crime_result_df['complaints per capita'] = Crime_result_df['CMPLNT_NUM'] / Crime_result_df['Population']
Crime_result_df['complaints per capita'] = Crime_result_df['complaints per capita'].astype('float64')
return Crime_result_df
#EMS_incident_response_avg = EMS_Data[['INCIDENT_RESPONSE_SECONDS_QY','BOROUGH']]
def EMS_details(dfname) -> pd.DataFrame:
'''
This function returns a dataframe containing details of the incident response time, incident datetime, borough
There are also details pertaining to the population density , which is a result of the 2 dataframes being joined
:param dfname: EMS dataframe to be passed as input
>>> ems_sample_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/EMS_incidentResponse.csv'
>>> ems_sample_df = pd.read_csv(ems_sample_csv)
>>> Pop_density_csv = 'https://raw.githubusercontent.com/rahulrohri/final_project_2020Sp/master/DocTest%20Dummy%20Files/Dummy_pop_density.csv'
>>> Pop_density_df = pd.read_csv(Pop_density_csv)
>>> ans = EMS_details(ems_sample_df)
>>> ans.iloc[0]['Population']
2504700
'''
EMS_incident_response_avg = dfname
EMS_incident_response_avg = EMS_incident_response_avg.groupby(['BOROUGH','Incident_Year','Incident_Month'],as_index = False)['INCIDENT_RESPONSE_SECONDS_QY'].mean()
EMS_incident_response_avg['AVG_INCIDENT_RESPONSE (Minutes)'] = EMS_incident_response_avg['INCIDENT_RESPONSE_SECONDS_QY']/60
#Renaming the index to Staten Island
EMS_incident_response_avg = EMS_incident_response_avg.replace({'BOROUGH': 'RICHMOND / STATEN ISLAND'}, 'STATEN ISLAND')
result_inc_resp_df = pd.merge(Pop_density_df, EMS_incident_response_avg, how='inner', left_on='BORO_NM', right_on='BOROUGH')
return result_inc_resp_df | [
"noreply@github.com"
] | rahulrohri.noreply@github.com |
6b9e64cf6ae66302800054f71df69a98d2400cb1 | 44754d4b76b59c91ec700e4b07fc7c20930eedd2 | /app.py | 3a5cc4daa936e681a3271a060c3939094f3e2ce7 | [
"MIT"
] | permissive | ghostcat404/simple_ml_model | f18b8e18a06b30ffc46e7b709473a6793f2ff335 | 0a65fb4a376d1781d356dbcedd5335f3d6803ab1 | refs/heads/master | 2023-06-10T18:30:07.288304 | 2021-06-17T23:39:57 | 2021-06-17T23:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | import logging
import argparse
from textwrap import dedent
import mlflow
from sklearn.pipeline import Pipeline
from dotenv import load_dotenv
from utils import ModelMapper, DatasetMapper, save_model, get_model_params
# TODO: дописать применение модели
def run_train(args):
if args.params_path is not None:
params = get_model_params(args.params_path)['params']
else:
params = None
model = ModelMapper.get_model(args.model_type)(params)
model = Pipeline([
('estimator', model)
])
logging.info('Load model %s', model)
X, y = DatasetMapper.get_data('iris')
logging.info('Fit model')
mlflow.set_tracking_uri("http://194.67.111.68:5000")
mlflow.set_experiment(args.exp_name)
with mlflow.start_run() as run:
model.fit(X, y)
if params:
mlflow.log_params(params)
mlflow.sklearn.log_model(model, artifact_path="model")
if args.model_name is not None:
logging.info('Save %s to %s', model, args.model_name)
save_model(args.model_name, model)
def setup_parser(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers(
help='Choose command. Type <command> -h for more help'
)
train_parser = subparsers.add_parser(
'train',
help='train choosen model',
formatter_class=argparse.RawTextHelpFormatter,
)
train_parser.add_argument(
'--model',
help=dedent('''
Choose model type
Available types:
- logistic: sklearn.linear_models.LogisticRegression
'''),
dest='model_type',
type=str,
required=True
)
train_parser.add_argument(
'--config-params-path',
help='path to model params .yml file',
dest='params_path',
type=str,
required=False,
default=None
)
train_parser.add_argument(
'--model-name',
help='name of model',
dest='model_name',
type=str,
required=False,
default=None
)
train_parser.add_argument(
'--exp-name',
help='name of experiment',
dest='exp_name',
type=str,
required=False,
default='test_exp'
)
train_parser.set_defaults(callback=run_train)
def main():
load_dotenv()
parser = argparse.ArgumentParser('Simple ML project')
setup_parser(parser)
args = parser.parse_args()
args.callback(args)
if __name__ == '__main__':
main()
| [
"aechesnov@yandex.ru"
] | aechesnov@yandex.ru |
c03e0187f206d06f07e5771f3ff8b322dcdba6cf | ce387fc31007f0616b6f2805bf998ae5f6288224 | /qubole_assembly/configure_airflow.py | aa05c8e46596fa8911dbf9ea0464dd358c76d091 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | harishjami1382/test2 | d3466604209377e899a239c4f29446ffef06684e | f778cc7290904a84bed06f65fa5dbb49a63639f0 | refs/heads/master | 2023-02-25T18:44:21.114158 | 2021-02-04T10:19:50 | 2021-02-04T10:19:50 | 335,915,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,826 | py | try:
import ConfigParser
except:
import configparser as ConfigParser
import traceback
import sys
import os
from optparse import OptionParser
import base64
import subprocess
def handle_specific_config(overrides_map, options):
# Handle sqlalchemy settings
use_cluster_datastore = False
core_settings = overrides_map.get('core', {})
logging_settings = overrides_map.get('logging', {})
if not 'remote_base_log_folder' in logging_settings or logging_settings['remote_base_log_folder'] is None or logging_settings['remote_base_log_folder'] == "":
logging_settings['remote_base_log_folder'] = os.getenv('AIRFLOW_LOGS_LOCATION', "")
if 'logging' not in overrides_map.keys():
overrides_map['logging'] = logging_settings
cluster_id = os.getenv('CLUSTER_ID', "")
qubole_base_url = os.getenv('QUBOLE_BASE_URL', "api.qubole.com")
if not 'sql_alchemy_conn' in core_settings or core_settings['sql_alchemy_conn'] is None or core_settings['sql_alchemy_conn'] == "":
core_settings['sql_alchemy_conn'] = "postgresql://root:" + cluster_id + "@localhost:5432/airflow"
use_cluster_datastore = True
# Handle webserver settings
web_server_port = '8080'
if not 'webserver' in overrides_map:
overrides_map['webserver'] = {}
# user controlled port will be bad idea, keeping it 8080 only
overrides_map['webserver']['web_server_port'] = web_server_port
if not 'base_url' in overrides_map['webserver']:
# Ideally we should not accpet any overrides for base url, this is temporary as sometimes we have to manually
# setup multi-node cluster using various one-node clusters.
overrides_map['webserver']['base_url'] = qubole_base_url + "/airflow-rbacwebserver-" + cluster_id
# Handle celery executor settings
default_broker_url = 'amqp://guest:guest@localhost:5672/'
use_cluster_broker_airflow = True
use_celery_airflow = True
if overrides_map.get('core', {}).get('executor', None) == 'CeleryExecutor':
# Executor type will always be there because we will set it in recommended config to use celery broker
if 'celery' in overrides_map and 'broken_url' in overrides_map['celery']: # Means user is hosting his own messaging broker
use_cluster_broker_airflow = False
else: # Implies user does not want to use celery executor
use_cluster_broker_airflow = False
use_celery_airflow = False
if use_celery_airflow:
if not 'celery' in overrides_map:
overrides_map['celery'] = {}
if use_cluster_broker_airflow:
overrides_map['celery']['broker_url'] = default_broker_url # Default broker config on machine
if 'result_backend' not in overrides_map['celery']:
# Reason for using sql alchemy for result backend: QBOL-5589
sql_alchemy_conn = overrides_map['core']['sql_alchemy_conn']
overrides_map['celery']['result_backend'] = 'db+' + sql_alchemy_conn
if 'celeryd_concurrency' in overrides_map['celery']:
overrides_map['celery']['worker_concurrency'] = overrides_map['celery']['celeryd_concurrency']
del overrides_map['celery']['celeryd_concurrency']
overrides_map['webserver']['rbac'] = False
return (use_cluster_broker_airflow, use_celery_airflow, use_cluster_datastore)
def setup_scheduler_child_process_directory_and_cron(overrides_map):
if not 'scheduler' in overrides_map:
overrides_map['scheduler'] = {}
if not 'child_process_log_directory' in overrides_map['scheduler']:
overrides_map['scheduler']['child_process_log_directory'] = '{0}/scheduler_task_logs'.format(os.getenv('AIRFLOW_LOG_DIR', '/media/ephemeral0/logs/airflow'))
if not 'child_process_log_rotation_days' in overrides_map['scheduler']:
overrides_map['scheduler']['child_process_log_rotation_days'] = '2'
def setup_logs_symlink(final_config):
logs_folder = final_config['logging']['base_log_folder']
symlink_folder = "{0}/logs".format(final_config['core']['airflow_home'])
if logs_folder != symlink_folder:
symlink_command = ["ln", "-s", logs_folder, symlink_folder]
process = subprocess.Popen(symlink_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std, err = process.communicate()
if err != '':
print("An error occured while creating symlink: {0}".format(err))
def main():
optparser = OptionParser()
optparser.add_option("--airflow-overrides", default="", help="Airflow config overrides")
optparser.add_option("--master-public-dns", default=None, help="Master Public DNS of the cluster")
optparser.add_option("--airflow-home", help="Airflow Home")
optparser.add_option("--airflow-env-var-file", help="Airflow Environment File Location")
(options, args) = optparser.parse_args()
if options.airflow_home is None:
optparser.error('--airflow-home is mandatory')
if options.airflow_env_var_file is None:
optparser.error('--airflow-env-var-file is mandatory')
# Overall aim is to merge the overrides by user/recommended with the ones present as default in airflow config.
# Read config from Airflow Config file present at AIRFLOW_HOME
config = ConfigParser.RawConfigParser()
airflow_config_file_path = os.path.join(options.airflow_home , 'airflow.cfg')
config.read(airflow_config_file_path)
config_sections = config.sections()
# Parse the overrides in the form section1.key1=value1!section2.key2=value2..
# Store them in a map where key is section name and value is
# a map with key value pairs of that section
airflow_overrides = options.airflow_overrides
overrides = airflow_overrides.split('!')
overrides_map = {}
for override in overrides:
kv = override.split('.', 1)
if len(kv) != 2:
continue
section = kv[0]
prop_val = kv[1]
kv = prop_val.split('#', 1)
if len(kv) != 2:
continue
if not section in overrides_map:
overrides_map[section] = {}
overrides_map[section][kv[0]] = base64.b64decode(kv[1]).decode('utf-8')
(use_cluster_broker_airflow, use_celery_airflow, use_cluster_datastore) = handle_specific_config(overrides_map, options)
setup_scheduler_child_process_directory_and_cron(overrides_map)
# Get all sections by combining sections in overrides and config file
overrides_sections = list(overrides_map.keys())
sections = set(config_sections + overrides_sections)
final_config = {}
# Now it's time to merge configurations of both airflow config file and overrides
for section in sections:
config_items = {}
if config.has_section(section):
# config.items(section) is of the form [(key1, value1), (key2, value2)..] and then converted to dict.
config_items = dict(config.items(section))
override_items = {}
if section in overrides_map:
override_items = overrides_map[section]
# Merge the 2 maps
# Priority overrides > default config
final_section_config = dict(list(config_items.items()) + list(override_items.items()))
final_config[section] = final_section_config
# Finally we just reset the config object to have all sections with required options
for section in final_config.keys():
if not config.has_section(section):
config.add_section(section)
for option in final_config[section].keys():
config.set(section, option, final_config[section][option])
# Now dump the config again in the airflow config file
with open(airflow_config_file_path, 'w') as airflow_config_file:
config.write(airflow_config_file)
airflow_env_var_file_path = options.airflow_env_var_file
setup_logs_symlink(final_config)
newFileData = ""
for line in open(airflow_env_var_file_path, 'r'):
if "export USE_CELERY_AIRFLOW=" in line or "export USE_CLUSTER_BROKER_AIRFLOW=" in line or "export USE_CLUSTER_DATASTORE=" in line:
line = ""
newFileData += line
with open(airflow_env_var_file_path, 'w') as airflow_env_var_file:
airflow_env_var_file.write(newFileData)
with open(airflow_env_var_file_path, 'a') as airflow_env_var_file:
airflow_env_var_file.write("export USE_CELERY_AIRFLOW=" + str(use_celery_airflow) + "\n")
airflow_env_var_file.write("export USE_CLUSTER_BROKER_AIRFLOW=" + str(use_cluster_broker_airflow) + "\n")
airflow_env_var_file.write("export USE_CLUSTER_DATASTORE=" + str(use_cluster_datastore) + "\n")
if __name__ == '__main__':
try:
sys.exit(main())
except Exception:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
| [
"jami.harish@accolite.com"
] | jami.harish@accolite.com |
66c605b31758d9dc44858b8868ef503970bdaba6 | 4ea7855ef54e1a62df5d25aa30acfc564c676ab9 | /catalog/tests/test_models.py | dae7890e4f8af49e7b195ea6714413e628481aa5 | [
"CC0-1.0"
] | permissive | Novel-Public-Health/Novel-Public-Health | 00282b9d3801494f7fc25f5f44c33070fa119bef | fbb7dd0da64ae4fc9641097ca8056152129bd83b | refs/heads/main | 2023-04-29T19:27:51.440301 | 2021-05-07T03:42:10 | 2021-05-07T03:42:10 | 337,804,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py | from django.test import TestCase
# Create your tests here.
from catalog.models import Director, Genre, Language, Movie, Profile
class DirectorModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods.
Director.objects.create(name='Big Bob')
def test_name_label(self):
director = Director.objects.get(id=1)
field_label = director._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
def test_date_of_birth_label(self):
director = Director.objects.get(id=1)
field_label = director._meta.get_field('date_of_birth').verbose_name
self.assertEquals(field_label, 'date of birth')
def test_date_of_death_label(self):
director = Director.objects.get(id=1)
field_label = director._meta.get_field('date_of_death').verbose_name
self.assertEquals(field_label, 'died')
def test_get_absolute_url(self):
director = Director.objects.get(id=1)
# This will also fail if the urlconf is not defined.
self.assertEquals(director.get_absolute_url(), '/directors/1')
class GenreModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up most popular genres.
cls.genres = ['Action', 'Fantasy', 'Comedy', 'Romance', 'Documentary']
for name in cls.genres:
Genre.objects.create(name=name)
def test_name_label(self):
for num, name in enumerate(self.genres, start=1):
genre = Genre.objects.get(id=num)
field_label = genre._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
# test the name of the genre
self.assertEquals(genre.name, name)
class LanguageModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up most popular languages.
cls.languages = ['Mandarin Chinese', 'Spanish', 'English', 'Hindi']
for name in cls.languages:
Language.objects.create(name=name)
def test_name_label(self):
for num, name in enumerate(self.languages, start=1):
language = Language.objects.get(id=num)
field_label = language._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
# test the name of the language
self.assertEquals(language.name, name)
import re
class MovieModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.imdb_movie = Movie.objects.create(
title='Concussion',
imdb_link='https://www.imdb.com/title/tt3322364/'
)
cls.non_imdb_movie = Movie.objects.create(
title='Public Health YOU Should Know',
director=Director.objects.create(name='Big Bob'),
language=Language.objects.create(name='English'),
summary="This movie is the greatest thing to ever see. \
Forreal.",
genre=Genre.objects.create(name='Documentary'),
year='2021'
)
def test_non_imdb_movie(self):
self.assertEquals(self.non_imdb_movie.director.name, 'Big Bob')
self.assertEquals(self.non_imdb_movie.language.name, 'English')
self.assertEquals(self.non_imdb_movie.genre.name, 'Documentary')
def test_imdb_movie(self):
imdb_stats = self.imdb_movie.get_imdb_stats()
self.assertEquals(imdb_stats[0], 2015)
self.assertEquals(imdb_stats[1]['name'], 'Peter Landesman')
self.assertTrue(re.match(r'(Biography|Drama|Sport)', imdb_stats[2])) | [
"haleau@live.unc.edu"
] | haleau@live.unc.edu |
8f1d3b33b6976f1a8d134f8df6ee8b3eb067f5b5 | 2ecc0f925df105282b5c725d5441f73ff7bf8317 | /py31파일처리/py31_17_encrypt.py | 5bf5c6f4f34a9da7baec6fa9c9b436b956584c32 | [] | no_license | anhduong2020/anhduong3202 | 8ed8e7cd7782fb7eec507d4abbbac7f64576a65b | e06e27d8dd56130fc66271f1013de61ceea89335 | refs/heads/master | 2021-01-13T20:09:02.994695 | 2020-03-15T09:08:22 | 2020-03-15T09:08:22 | 242,481,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | key = "abcdefghijklmnopqrstuvwxyz"
# 평문을 받아서 암호화하고 암호문을 반환한다.
# 암호문을 받아서 복호화하고 평문을 반환한다.
| [
"hdfj@naver.com"
] | hdfj@naver.com |
0bc44e39ed3c0411a6484900df8dc4ccda28fa3a | 67b0379a12a60e9f26232b81047de3470c4a9ff9 | /profile/migrations/0042_auto_20170225_1639.py | 6f002bfd9f51f8ca97ff8153953db520d0afe6e9 | [] | no_license | vintkor/whitemandarin | 8ea9022b889fac718e0858873a07c586cf8da729 | 5afcfc5eef1bb1cc2febf519b04a4819a7b9648f | refs/heads/master | 2021-05-06T03:35:09.367375 | 2017-12-20T15:43:08 | 2017-12-20T15:43:08 | 114,904,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-25 14:39
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('profile', '0041_auto_20170217_1405'),
]
operations = [
migrations.AlterField(
model_name='user',
name='date_of_birth',
field=models.DateField(default=datetime.datetime(2017, 2, 25, 14, 39, 18, 342403, tzinfo=utc)),
),
]
| [
"alkv84@yandex.ru"
] | alkv84@yandex.ru |
c1f3f5ba64a7e7a7306a3bb2c2820a4dbb6a892e | 5a7b38ee398e4f63a26b2ec2f6fa1efbce025264 | /api/src/dao/commentDao.py | 64c4f0711be9c2461c689cf2af8510d641966f97 | [] | no_license | AJarombek/saints-xctf-api | f1e361bfc762dcc197cbc78b2b41f7cff18919b5 | c2812089ec0351fd72ef7b1581a48bc55d65fd0e | refs/heads/main | 2023-02-09T11:50:04.698394 | 2023-01-29T18:52:33 | 2023-01-29T18:52:33 | 190,956,043 | 5 | 2 | null | 2023-01-22T23:04:10 | 2019-06-09T02:35:46 | Python | UTF-8 | Python | false | false | 6,225 | py | """
Comment data access from the SaintsXCTF MySQL database. Contains comments posted on exercise logs.
Author: Andrew Jarombek
Date: 7/3/2019
"""
from datetime import datetime
from sqlalchemy import desc
from database import db
from dao.basicDao import BasicDao
from model.Comment import Comment
class CommentDao:
@staticmethod
def get_comments() -> list:
"""
Retrieve all the comments in the database.
:return: The result of the query.
"""
return (
Comment.query.filter(Comment.deleted.is_(False))
.order_by(Comment.time)
.all()
)
@staticmethod
def get_comment_by_id(comment_id: int) -> Comment:
"""
Retrieve a single comment by its unique id
:param comment_id: The unique identifier for a comment.
:return: The result of the query.
"""
return (
Comment.query.filter_by(comment_id=comment_id)
.filter(Comment.deleted.is_(False))
.first()
)
@staticmethod
def get_comments_by_log_id(log_id: int) -> list:
"""
Retrieve all the comments on a specific exercise log.
:param log_id: Unique identifier for an exercise log.
:return: The result of the query.
"""
return (
Comment.query.filter_by(log_id=log_id)
.filter(Comment.deleted.is_(False))
.order_by(desc(Comment.time))
.all()
)
@staticmethod
def add_comment(new_comment: Comment) -> bool:
"""
Add a comment for an exercise log to the database.
:param new_comment: Object representing a comment for an exercise log.
:return: True if the comment is inserted into the database, False otherwise.
"""
# pylint: disable=no-member
db.session.add(new_comment)
return BasicDao.safe_commit()
@staticmethod
def update_comment(comment: Comment) -> bool:
"""
Update a comment in the database. Certain fields (log_id, username, first, last) can't be modified.
:param comment: Object representing an updated comment.
:return: True if the comment is updated in the database, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"""
UPDATE comments SET
time=:time,
content=:content,
modified_date=:modified_date,
modified_app=:modified_app
WHERE comment_id=:comment_id
AND deleted IS FALSE
""",
{
"comment_id": comment.comment_id,
"time": comment.time,
"content": comment.content,
"modified_date": comment.modified_date,
"modified_app": comment.modified_app,
},
)
return BasicDao.safe_commit()
@staticmethod
def delete_comment_by_id(comment_id: int) -> bool:
"""
Delete a comment from the database based on its id.
:param comment_id: ID which uniquely identifies the comment.
:return: True if the deletion was successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"DELETE FROM comments WHERE comment_id=:comment_id AND deleted IS FALSE",
{"comment_id": comment_id},
)
return BasicDao.safe_commit()
@staticmethod
def delete_comments_by_log_id(log_id: int) -> bool:
"""
Delete comments from the database based on the log they are bound 2.
:param log_id: ID which uniquely identifies the log.
:return: True if the deletions were successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"DELETE FROM comments WHERE log_id=:log_id AND deleted IS FALSE",
{"log_id": log_id},
)
return BasicDao.safe_commit()
@staticmethod
def soft_delete_comment(comment: Comment) -> bool:
"""
Soft Delete a comment from the database.
:param comment: Object representing a comment to soft delete.
:return: True if the soft deletion was successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"""
UPDATE comments SET
deleted=:deleted,
modified_date=:modified_date,
modified_app=:modified_app,
deleted_date=:deleted_date,
deleted_app=:deleted_app
WHERE comment_id=:comment_id
AND deleted IS FALSE
""",
{
"comment_id": comment.comment_id,
"deleted": comment.deleted,
"modified_date": comment.modified_date,
"modified_app": comment.modified_app,
"deleted_date": comment.deleted_date,
"deleted_app": comment.deleted_app,
},
)
return BasicDao.safe_commit()
@staticmethod
def soft_delete_comments_by_log_id(log_id: int) -> bool:
"""
Soft Delete comments associated with an exercise log from the database.
:param log_id: Unique identifier for an exercise log.
:return: True if the soft deletion was successful without error, False otherwise.
"""
# pylint: disable=no-member
db.session.execute(
"""
UPDATE comments SET
deleted=:deleted,
modified_date=:modified_date,
modified_app=:modified_app,
deleted_date=:deleted_date,
deleted_app=:deleted_app
WHERE log_id=:log_id
AND deleted IS FALSE
""",
{
"log_id": log_id,
"deleted": True,
"modified_date": datetime.now(),
"modified_app": "saints-xctf-api",
"deleted_date": datetime.now(),
"deleted_app": "saints-xctf-api",
},
)
return BasicDao.safe_commit()
| [
"ajarombek95@gmail.com"
] | ajarombek95@gmail.com |
68bda07db08e3d6b58a8cbb0bf86ce63b584f900 | 5a1f77b71892745656ec9a47e58a078a49eb787f | /4_Backwoods_Forest/140-A_Fine_Mint/fine_mint.py | f17553bc8c35e99e05fe9b3bbd9916adfeaa85f8 | [
"MIT"
] | permissive | ripssr/Code-Combat | 78776e7e67c033d131e699dfeffb72ca09fd798e | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | refs/heads/master | 2020-06-11T20:17:59.817187 | 2019-07-21T09:46:04 | 2019-07-21T09:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | def pickUpCoin():
coin = hero.findNearestItem()
if coin:
hero.moveXY(coin.pos.x, coin.pos.y)
def attackEnemy():
enemy = hero.findNearestEnemy()
if enemy:
if hero.isReady("cleave"):
hero.cleave(enemy)
else:
hero.attack(enemy)
while True:
attackEnemy()
pickUpCoin()
| [
"katik.hello@gmail.com"
] | katik.hello@gmail.com |
ab90b58745ab3cc097586354bf150248a81ee6f9 | 6938ecea830f45abbef0218fa33be5ab21986eca | /ucpe/bcm_controller/bcm_controller.py | af406abae3955b4333e7333c1c19c771b0721204 | [] | no_license | AnEscapist/sdn-orchestrator | 16ea20d8cdcd817990bd34da071b03451a2cf3dd | f242bb2ab42864cde9f0bafb1a92acbb33173b24 | refs/heads/master | 2020-07-01T09:22:29.835594 | 2019-08-11T19:46:44 | 2019-08-11T19:46:44 | 201,121,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,367 | py |
from inspect import signature, Parameter
from ucpe.bcm_controller.utils import get_caller_function_name
import ucpe.bcm_controller.grpc.autobcm_pb2 as autobcm_pb2
import ucpe.bcm_controller.grpc.autobcm_pb2_grpc as autobcm_pb2_grpc
import grpc
hostname = "10.10.81.250:50051"
class BCMController:
@staticmethod
def bcm_controller_show_active_ports(**kwargs):
func = show_active_ports
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_create_vlan(**kwargs):
func = create_vlan
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_destroy_vlan(**kwargs):
func = destroy_vlan
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_show_vlans(**kwargs):
func = show_vlans
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_add_ports(**kwargs):
func = add_ports
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_rem_ports(**kwargs):
func = rem_ports
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_set_pvlan(**kwargs):
func = set_pvlan
return _call_function(func, **kwargs)
@staticmethod
def bcm_controller_show_pvlans(**kwargs):
func = show_pvlans
return _call_function(func, **kwargs)
def show_active_ports():
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest()
response = stub.ShowActivePorts(request)
return response.message
def create_vlan(vlanid, pbm='', ubm=''):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm, ubm=ubm)
rv = ''
response = stub.CreateVLAN(request)
rv = rv + response.message
if pbm != '':
response = stub.AddPorts(request)
rv = rv + '\n' + response.message
return rv
def destroy_vlan(vlanid):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid)
response = stub.DestroyVLAN(request)
return response.message
def show_vlans():
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest()
response = stub.ShowVLANs(request)
return response.message
def add_ports(vlanid, pbm, ubm=''):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm, ubm=ubm)
response = stub.AddPorts(request)
return response.message
def rem_ports(vlanid, pbm):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm)
response = stub.RemovePorts(request)
return response.message
def set_pvlan(vlanid, pbm):
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest(vlanid=vlanid, pbm=pbm)
response = stub.SetPVLAN(request)
return response.message
def show_pvlans():
channel = grpc.insecure_channel(hostname)
stub = autobcm_pb2_grpc.AutoBCMStub(channel)
request = autobcm_pb2.ConfigRequest()
response = stub.ShowPVLANs(request)
return response.message
def _call_function(func, **kwargs):
body = kwargs["body"] # todo: bad
params = signature(func).parameters # get the function arguments
relevant_kwargs = {} # todo: this is REALLY bad
for param in params:
if params[param].default == Parameter.empty:
try:
relevant_kwargs[param] = body[param]
except KeyError:
raise KeyError("missing argument " + param + " in call to " + func.__name__)
else: # todo: this is REALLY bad - depends on the arg name, but so does the request/response
relevant_kwargs[param] = body.get(param, params[param].default)
return_dict = {}
return_dict["result"] = func(**relevant_kwargs)
caller_name = get_caller_function_name()
return_dict["function"] = caller_name
return return_dict
| [
"azhang307@gatech.edu"
] | azhang307@gatech.edu |
203c4c5c65469b178d194de6b85feec2a5037e9a | 129941a1fb7c0bbd9969f0dd8843b057ce9f3666 | /VAJets/PKUTreeMaker/test/Wcrab/crab3_analysismu.py | 09dc3efeef0cc17499456da57454ef8dcc335da1 | [] | no_license | PKUHEPEWK/VBS_WGamma | 7cf43f136dd92777ab7a8a742c163e222b1f4dbf | 0f94abb2d4303b1c08d62971a74f25b100cbe042 | refs/heads/master | 2020-03-25T04:36:21.119377 | 2019-07-15T02:56:32 | 2019-07-15T02:56:32 | 143,404,007 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'SMu16B-v1'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles =['Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFchs.txt','Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFPuppi.txt','Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFPuppi.txt']
# Name of the CMSSW configuration file
config.JobType.psetName = 'analysis_data.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.inputDataset = '/SingleMuon/Run2016B-03Feb2017_ver2-v2/MINIAOD'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 40
config.Data.lumiMask = 'Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'
#config.Data.runRange = '246908-258750'
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'SMu16B-v1'
config.section_("Site")
config.Site.storageSite = 'T3_US_FNALLPC' #T2_CN_Beijing'
| [
"jiexiao@pku.edu.cn"
] | jiexiao@pku.edu.cn |
57154970bf889cb5ae8637e4d25670f1909939ea | 1213a0cc401365556d488913693cde54281ef350 | /backup_fully_working_with_sensor/ai.py | 5f272f3508088f2185990cc42d95d0cc93923477 | [] | no_license | mkhetan/session10_endgame_exp | 188adf0a8dae15f7603cc40fdc3b2dc9de1d0424 | e6cd6066697c95825708f264f77a123622e10d5b | refs/heads/master | 2022-07-15T22:04:31.604956 | 2020-05-12T05:19:33 | 2020-05-12T05:19:33 | 261,933,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,067 | py | # AI for Self Driving Car
# Importing the libraries
import numpy as np
import random
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
# Creating the architecture of the Neural Network
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.layer_1 = nn.Linear(state_dim, 400)
self.layer_2 = nn.Linear(400, 300)
self.layer_3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, x):
x = F.relu(self.layer_1(x))
x = F.relu(self.layer_2(x))
x = self.max_action * torch.tanh(self.layer_3(x))
return x
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Defining the first Critic neural network
self.layer_1 = nn.Linear(state_dim + action_dim, 400)
self.layer_2 = nn.Linear(400, 300)
self.layer_3 = nn.Linear(300, 1)
# Defining the second Critic neural network
self.layer_4 = nn.Linear(state_dim + action_dim, 400)
self.layer_5 = nn.Linear(400, 300)
self.layer_6 = nn.Linear(300, 1)
def forward(self, x, u):
xu = torch.cat([x, u], 1)
# Forward-Propagation on the first Critic Neural Network
x1 = F.relu(self.layer_1(xu))
x1 = F.relu(self.layer_2(x1))
x1 = self.layer_3(x1)
# Forward-Propagation on the second Critic Neural Network
x2 = F.relu(self.layer_4(xu))
x2 = F.relu(self.layer_5(x2))
x2 = self.layer_6(x2)
return x1, x2
def Q1(self, x, u):
xu = torch.cat([x, u], 1)
x1 = F.relu(self.layer_1(xu))
x1 = F.relu(self.layer_2(x1))
x1 = self.layer_3(x1)
return x1
# Implementing Experience Replay
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, transition):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = transition
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(transition)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
# Selecting the device (CPU or GPU)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TD3(object):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = torch.optim.Adam(self.actor.parameters())
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = Critic(state_dim, action_dim).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = torch.optim.Adam(self.critic.parameters())
self.max_action = max_action
def select_action(self, state):
state = torch.Tensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
for it in range(iterations):
# Step 4: We sample a batch of transitions (s, s’, a, r) from the memory
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size)
state = torch.Tensor(batch_states).to(device)
next_state = torch.Tensor(batch_next_states).to(device)
action = torch.Tensor(batch_actions).to(device)
reward = torch.Tensor(batch_rewards).to(device)
done = torch.Tensor(batch_dones).to(device)
# Step 5: From the next state s’, the Actor target plays the next action a’
next_action = self.actor_target(next_state)
# Step 6: We add Gaussian noise to this next action a’ and we clamp it in a range of values supported by the environment
noise = torch.Tensor(batch_actions).data.normal_(0, policy_noise).to(device)
noise = noise.clamp(-noise_clip, noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
# Step 7: The two Critic targets take each the couple (s’, a’) as input and return two Q-values Qt1(s’,a’) and Qt2(s’,a’) as outputs
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
# Step 8: We keep the minimum of these two Q-values: min(Qt1, Qt2)
target_Q = torch.min(target_Q1, target_Q2)
# Step 9: We get the final target of the two Critic models, which is: Qt = r + γ * min(Qt1, Qt2), where γ is the discount factor
target_Q = reward + ((1 - done) * discount * target_Q).detach()
# Step 10: The two Critic models take each the couple (s, a) as input and return two Q-values Q1(s,a) and Q2(s,a) as outputs
current_Q1, current_Q2 = self.critic(state, action)
# Step 11: We compute the loss coming from the two Critic models: Critic Loss = MSE_Loss(Q1(s,a), Qt) + MSE_Loss(Q2(s,a), Qt)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Step 12: We backpropagate this Critic loss and update the parameters of the two Critic models with a SGD optimizer
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Step 13: Once every two iterations, we update our Actor model by performing gradient ascent on the output of the first Critic model
if it % policy_freq == 0:
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Step 14: Still once every two iterations, we update the weights of the Actor target by polyak averaging
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# Making a save method to save a trained model
def save(self, filename, directory):
torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))
torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))
# Making a load method to load a pre-trained model
def load(self, filename, directory):
self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename)))
self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename)))
| [
"mkhetan@extremenetworks.com"
] | mkhetan@extremenetworks.com |
62a61d7f251b2dd796c2a0864e338c6272236b1a | 87828431072e3c60a92dc274b078d7cf1e5705be | /back_python/account/migrations/0001_initial.py | 34d3acacd2cf509d472797922ba4727ed9535d39 | [] | no_license | cash2one/habit | 90adfd80427a0c0d04104ea5cf8123cf025b2d8b | 3782e498e1e40d6b638aaf2c7c1ac087c0739a36 | refs/heads/master | 2021-01-19T12:32:51.627847 | 2017-04-11T15:41:28 | 2017-04-11T15:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-25 08:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('activity', '0013_auto_20170125_1649'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tradeDate', models.DateField(auto_now=True, verbose_name='时间')),
('tradeType', models.CharField(choices=[('fee', '套餐服务费'), ('deposit', '押金'), ('milyInput', '套餐囤米'), ('milyInputByDeposit', '押金囤米'), ('milyOutput', '米粒打赏'), ('milyOutputByDonate', '米粒捐赠'), ('feedBack', '打卡奖励米粒'), ('feedBackReturnDeposit', '打卡返还押金'), ('aveDeposit', '平均分配懒人押金')], max_length=50, verbose_name='类型')),
('fee', models.IntegerField(default=0, verbose_name='套餐服务费')),
('deposit', models.IntegerField(default=0, verbose_name='囤米押金')),
('milyInput', models.IntegerField(default=0, verbose_name='套餐囤米')),
('milyInputByDeposit', models.IntegerField(default=0, verbose_name='押金囤米')),
('milyOutput', models.IntegerField(default=0, verbose_name='米粒打赏')),
('milyOutputByDonate', models.IntegerField(default=0, verbose_name='米粒捐赠')),
('feedBack', models.IntegerField(default=0, verbose_name='打卡奖励米粒')),
('feedBackReturnDeposit', models.IntegerField(default=0, verbose_name='打卡奖励押金')),
('aveDeposit', models.IntegerField(default=0, verbose_name='平均分配懒人押金')),
('createdTime', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('updatedTime', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='activity.Activity', verbose_name='活动')),
],
),
]
| [
"jiangyong@qq.com"
] | jiangyong@qq.com |
72e87ff5fac87b45a4fbe10d20bbd6dc95907e38 | 242ebcb7220c2e16c141a6bea4a09c7cb5e4287d | /accounts/forms.py | 83f3c4a31f7b0a3a43e78a73a2980318f2d55c71 | [] | no_license | olivx/estudos_crud | 06ed8c269a4c36db3579daf6d6aef5e7d49dc5f9 | 24af031ed44a7c6cf567368556d368fe58ab1090 | refs/heads/master | 2021-01-11T09:28:49.355388 | 2017-03-03T15:17:25 | 2017-03-03T15:17:25 | 81,199,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | from django import forms
from django.contrib.auth import authenticate
from accounts.models import User
from django.utils.translation import ugettext_lazy as _
class RegisterForm(forms.ModelForm):
password1 = forms.CharField(max_length=30, widget=forms.PasswordInput, required=True)
password2 = forms.CharField(max_length=30, widget=forms.PasswordInput, required=True)
def clean_password2(self):
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.email = self.cleaned_data['username']
user.email = self.cleaned_data['email']
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class AuthenticanUserForm(forms.Form):
email = forms.EmailField(label='Email', max_length=30, required=True)
password = forms.CharField(label='Password', max_length=30, required=True, widget=forms.PasswordInput)
error_messages = {
'invalid_login': _(
"Please enter a correct %(email)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': _("This account is inactive."),
'email_confirmation': _(
'this email is not confirmed yet, please confirm the your eamil and try again'
),
}
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user = authenticate(email=email, password=password)
if self.user is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'email': 'Email'},
)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
if not user.profile.email_confirmation:
raise forms.ValidationError(
self.error_messages['email_confirmation'],
code='email_confirmation'
)
class Meta:
fields = ('email', 'password')
| [
"oliveiravicente.net@gmail.com"
] | oliveiravicente.net@gmail.com |
ddeca087ba585f1d6f5c7bf63b5f45edb6aef713 | 0fee9fa700f769b8fbdbe0549d4b518a4a84b66e | /node/image_predict/image_predict2.py | b3bd45bb143b4f919421a68b3a6a414f33851551 | [] | no_license | KoGaYoung/2019_Capstone-design | c885b7fd31abf92cad303e26746eb310d5093a65 | 00e9815600157bb2f452a09f5e0c300a73deaf0b | refs/heads/master | 2020-09-15T17:45:41.785488 | 2020-04-27T14:46:46 | 2020-04-27T14:46:46 | 223,519,294 | 2 | 3 | null | 2019-11-23T02:31:13 | 2019-11-23T02:31:12 | null | UTF-8 | Python | false | false | 2,128 | py | import matplotlib
import numpy as np
import os
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
import operator
from keras.models import load_model
from keras.preprocessing import image
import base64
import ast
import sys
def recommand(predictions, class_dict):
# Recommend top 3
predictions = predictions[0].tolist()
predict = []
for i in range(len(predictions)):
predict.insert(i, [i, predictions[i]])
predict2 = sorted(predict, key=lambda x: x[1], reverse=True)
re_str = ""
for i in range(10):
recommend = [name for name, target in class_dict.items() if target == predict2[i][0]]
re_str = re_str + str(recommend)[2:len(recommend)-3]
if i == 9:
break
re_str += ","
# recommand_percent = predict2[i][1]
# print(recommand, " ", round(recommand_percent, 3) * 100, "%")\
return re_str
# base64.txt -> image -> save folder
remove_str = 'data:image/png;base64,'
image_path = '/home/student/2019_Capstone-design/node/image_predict/predict_image/12/out.png'
g = open(image_path, 'wb')
g.write(base64.b64decode(sys.argv[1][len(remove_str):]))
g.close()
model1 = load_model('/home/student/2019_Capstone-design/node/image_predict/v2.01.h5')
model1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Print test prediction
f2 = open('/home/student/2019_Capstone-design/node/image_predict/label_dict.txt')
label_dict = eval(f2.read())
f2.close()
batchsize = 64
image_size = (255, 255)
pred_gen = ImageDataGenerator().flow_from_directory(
'/home/student/2019_Capstone-design/node/image_predict/predict_image/',
class_mode='categorical',
batch_size=batchsize,
target_size=image_size
)
predictions = model1.predict_generator(pred_gen)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
import operator
index, value = max(enumerate(predictions[0]), key=operator.itemgetter(1))
pred_result = [name for name, target in label_dict.items() if target == index]
#recommand_top3
re_list = recommand(predictions, label_dict)
print(re_list)
| [
"4723515@naver.com"
] | 4723515@naver.com |
fd8b43d4911e377665daaa891b42fdc5fbbe6787 | bff728cca48292af65e6c14ca942ba92f0161f0f | /wiki_search.py | 9354460dac59f44ecdc1dba005eb13e21b9eff58 | [] | no_license | enriched-uranium235/chatbot | 1520a68d2084c4b4b61136ce9da167786a5878d8 | e95f6dde49dc684b93b4f95ad8da085b95840dd3 | refs/heads/master | 2023-02-20T05:18:25.807488 | 2021-01-06T07:43:52 | 2021-01-06T07:43:52 | 327,227,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import wikipedia
def wikipediaSearch(search_text):
response_string = ""
wikipedia.set_lang("ja")
search_response = wikipedia.search(search_text)
if not search_response:
response_string = "その単語は登録されていません。"
return response_string
try:
wiki_page = wikipedia.page(search_response[0])
except Exception as e:
response_string = "エラーが発生しました。\n{}\n{}".format(e.message, str(e))
return response_string
wiki_content = wiki_page.content
response_string += wiki_content[0:wiki_content.find("。")] + "。\n"
response_string += "リンクはこちら:" + wiki_page.url
return response_string
if __name__ == "__main__":
while True:
user_input = input("検索したい単語を入力してください。:")
if not user_input:
break
print(wikipediaSearch(user_input)) | [
"hexanitrobenzene@gmail.com"
] | hexanitrobenzene@gmail.com |
a7b174b85eba3c6f121e88eb9985de14f93428b9 | 14ac991bba2eb7d59a1d76db792b7689316f8060 | /leetcode/00179.py | 2097fd3046480dd7c91a1af857c955626b82b82d | [] | no_license | munagekar/cp | bde88fa565a7e2158ebe0f2611c4718a3d2970f1 | c25d29f68943e3721233e177abe13068e5f40e4b | refs/heads/master | 2021-07-04T05:00:02.511874 | 2021-05-30T14:30:05 | 2021-05-30T14:30:05 | 240,286,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from itertools import zip_longest
from functools import cmp_to_key
def cmp(a, b):
if a + b > b + a:
return 1
else:
return -1
class Solution:
def largestNumber(self, nums: List[int]) -> str:
nums = map(str, nums)
nums = sorted(nums, key=cmp_to_key(cmp), reverse=True)
nums = "".join(nums)
return nums.lstrip("0") or "0" | [
"avm.abhishek@gmail.com"
] | avm.abhishek@gmail.com |
674c893b5c3d74c716ca8d94e1128a7f82ceea42 | fbf22c2f482f47d45f1ddd024cc2a12236c25dbe | /transfer/lib/gitCom.py | 146f8682c71806418ebca2a3ee96da78ba44e153 | [] | no_license | LofOWL/Jupyter-code-tracker | 3068e7e71ad9ecc43e8118257434b00fdb0cb9d9 | 2cdf55dc3d7f7f697ae57fe7661b23150b1a5edf | refs/heads/master | 2022-02-18T05:25:51.427151 | 2022-02-13T22:03:38 | 2022-02-13T22:03:38 | 234,213,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | import os
import subprocess
class git:
def __init__(self,path):
print(path)
os.chdir(path)
self.path = path
self.ipynb_commit = "git log --pretty=%H --follow *.ipynb"
self.commit_name_status = ""
self.commit_parent = ""
def set_commit_name_status(self,commit):
self.commit_name_status = "git show "+str(commit)+" --name-status --pretty=\"\" "
def set_commit_parent(self,commit):
self.commit_parent = "git show "+str(commit)+" --name-status --pretty=\"raw\" "
def run(self,commond):
os.chdir(self.path)
diff = subprocess.check_output(commond,shell=True)
alist = diff.decode("utf-8").split("\n")
return alist[:-1]
| [
"jeffersonjjin@gmail.com"
] | jeffersonjjin@gmail.com |
bf5bf6f46c12b9cda5cfa83050001a0e72113069 | 79e45fa0e495be5aa967b21467771a27970df99b | /178.py | c1be11f9ea4f1241ffac13ed1324ae98de88788d | [] | no_license | kisa77/Crawl | 7d3b6d7077e60a47f5336b2976b226646cfe2cdb | ec25b563007481b169165f06ca04560d64a1ea74 | refs/heads/master | 2016-09-02T11:19:14.152200 | 2013-07-16T10:12:09 | 2013-07-16T10:12:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,896 | py | #/usr/bin/python
#coding=utf8
import urllib2,urlparse
import os,sys,bs4,chardet,MySQLdb
import re
from datetime import datetime
from HTMLParser import HTMLParser
class Crawl:
""" Class Crawl crawl data from db.178.com """
__data = ''
__connect = ''
__retryMax = 3
def __init__(self):
self.connect(c_host='localhost', c_user='root', c_passwd='root12')
def request_url(self,url):
try:
request = urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) \ AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 ')
return urllib2.urlopen(request)
except CrawlError as e:
return
def write_file(self,file_name, content_list):
file = open(file_name, 'w')
for item in content_list:
file.write(item.prettify())
file.close()
def parse_web_page(self, cont, from_encoding='utf-8'):
return bs4.BeautifulSoup(cont, from_encoding='utf-8')
def connect(self, c_host, c_user, c_passwd):
if not self.__connect:
self.__connect = MySQLdb.connect(host=c_host, user=c_user, passwd=c_passwd)
return self.__connect
else:
return self.__connect
def output_log(self, msg):
print "[" + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "]\t" + msg
def save_to_db(self, data):
self.output_log("---\tsave to db...")
self.__connect.select_db('weixin')
cursor = self.__connect.cursor()
cursor.execute("select id from wow_items where id = %s", data['id'])
tmpResult = cursor.fetchall()
if tmpResult:
self.output_log("item " + data['id'] + " already exists! skip...")
return
insertData = [data['id'], data['name'], 0, 0, data['position'], data['attribute'], '']
insertData += [data['quality'], data['qnumber'], data['img'], data['html']]
cursor.execute("insert into wow_items values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", insertData)
self.__connect.commit()
del(cursor)
self.output_log("---\tsave to db success!")
return
def crawl_item(self, url):
self.__data = {}
for i in range(1, self.__retryMax):
self.output_log("crawling " + url + " ... retry:" + str(i))
tmpCont = self.request_url(url)
if not tmpCont :
continue
if tmpCont.readline() == 'no data':
self.output_log("---\t no data")
return
tmpSoup = self.parse_web_page(tmpCont.read())
bbCode = tmpSoup.find(id='bbcode_content')
try :
self.__data['img'] = re.compile(r'\[img\](.*)\[\/img\]').findall(bbCode.prettify())[0]
except:
self.__data['img'] = ''
try :
self.__data['quality'] = re.compile(r'(\d)').findall(tmpSoup.find(id='item_detail').find('h2')['class'][0])[0]
except:
self.__data['quality'] = ''
try :
self.__data['name'] = tmpSoup.find(id='item_detail').find('strong').text
except:
self.__data['name'] = ''
try :
self.__data['id'] = re.compile(r'ID:([0-9]*)').findall(tmpSoup.find(id='item_detail').find('span').text)[0]
except:
self.__data['id'] = ''
try :
self.__data['qnumber'] = tmpSoup.find(id='item_detail').find(id='ilv').text
except:
self.__data['qnumber'] = ''
try :
self.__data['position'] = tmpSoup.find(id='item_detail').find('table').find('table').find('th').text
except:
self.__data['position'] = ''
try :
self.__data['html'] = tmpSoup.find(id='main').find_all('div')[1].prettify()
except:
self.__data['html'] = ''
try :
""" strip html tag """
parser = HTMLParser()
tmpList = []
parser.handle_data = tmpList.append
parser.feed(tmpSoup.find(id='item_detail').find(id='_dps').prettify().strip("\n"))
parser.close()
self.__data['attribute'] = ''.join(tmpList)
except:
self.__data['attribute'] = ''
""" del temporary variables"""
del(parser,tmpList,tmpSoup,bbCode,tmpCont)
if not self.__data:
continue
return self.save_to_db(self.__data)
crawl = Crawl()
for num in range(1495, 100000):
try :
request_url = 'http://db.178.com/wow/cn/item/' + str(num) + '.html'
crawl.crawl_item(url=request_url)
except :
print crawl.output_log('Exception! skip..')
| [
"lixiao@comsenz.com"
] | lixiao@comsenz.com |
dcd252960a1b08665c15f551c29bd9f9aacd0218 | 1abd12b1de8c92b5d6fa251544892d7f55c45ab1 | /MA-ConceptMining/inout/inputoutput.py | f0b680c5d5d92aadea071a5aaf8dc7cc3a01eedc | [
"MIT"
] | permissive | johannabi/MA-ConceptMining | fddd0511b71f664605e10ce33dad29055ba1239e | 5c257c234ee86ef10f3b358f623f342aa829df54 | refs/heads/master | 2023-07-15T15:22:02.003888 | 2021-08-30T09:58:33 | 2021-08-30T09:58:33 | 246,010,367 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,476 | py | import sqlite3 as sql
import re
import csv
def read_file_by_line(file):
"""
reads a text file line by line
:param file: path to file
:return: list of all lines
"""
word_list = list()
with open(file, mode='r', encoding='utf-8') as f:
for line in f:
line = line.strip()
word_list.append(line)
return word_list
def read_esco_csv(file, only_unigrams, synsets):
"""
reads a csv file containing esco skills
:param file: path to file
:param only_unigrams: True if you only want to collect unigram skills
:param synsets: True if you want to group skills by synsets
:return: list of skills or list of synsets
"""
if synsets:
synset_list = list()
else:
skills = list()
with open(file, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(reader, None) # skip header
for row in reader:
pref_label = row[4]
alt_labels = row[5]
synset = set()
if only_unigrams:
if ' ' not in pref_label:
synset.add(pref_label)
else:
synset.add(pref_label)
if len(alt_labels) > 0:
label_list = alt_labels.split('\n')
for l in label_list:
if only_unigrams:
if ' ' not in l:
synset.add(l)
else:
synset.add(l)
if synsets:
if len(synset) > 1: # process only synset with more than one member
synset_list.append(synset)
else:
skills.extend(synset)
if synsets:
return synset_list
else:
return skills
def read_ams_synsets(file, only_unigrams, synsets):
"""
:param file:
:param only_unigrams:
:param synsets:
:return:
"""
conn = sql.connect(file)
sql_select = """SELECT Synonyms, Orig_String FROM Categories"""
c = conn.cursor()
c.execute(sql_select)
rows = c.fetchall()
if synsets:
synsets = set()
for r in rows:
syns = r[0]
comp = r[1]
if syns is None:
continue
# collect als synonyms that are single-word-expressions
synset = set([s.lower() for s in syns.split(' | ') if ' ' not in s])
if only_unigrams:
if ' ' not in comp:
synset.add(comp.lower())
else:
synset.add(comp.lower())
if len(synset) > 1:
synsets.add(tuple(synset))
c.close()
return synsets
else:
skills = list()
for r in rows:
comp = r[1]
if only_unigrams:
if ' ' not in comp:
skills.append(comp.lower())
else:
skills.append(comp.lower())
c.close()
return skills
def read_jobads_content(file):
"""
reads all jobads from given sqlite file
:param file: path to file
:return: list of all jobads
"""
conn = sql.connect(file)
sql_select = """SELECT STELLENBESCHREIBUNG FROM jobs_textkernel"""
c = conn.cursor()
c.execute(sql_select)
rows = c.fetchall()
jobs = list()
for r in rows:
jobs.append(r[0])
return jobs
| [
"jbinnewitt@gmail.com"
] | jbinnewitt@gmail.com |
1cd644fe4370089fe5cf86ae2fc2d3fa316e8e2e | e629d61db2f08f66cf46d934ab0f87fa1666de05 | /backend/lively_heart_25130/urls.py | 5c32c3d6b9e17dce8e7eb899ed0a90b4b5455ae7 | [] | no_license | crowdbotics-apps/lively-heart-25130 | ec80559da8d6b168df1ce75415c5d6b916c97ee1 | ed33785297cbb8f794034de1bc3c7fb81bdbe048 | refs/heads/master | 2023-03-24T16:57:41.146127 | 2021-03-19T21:41:18 | 2021-03-19T21:41:18 | 349,561,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | """lively_heart_25130 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Lively Heart"
admin.site.site_title = "Lively Heart Admin Portal"
admin.site.index_title = "Lively Heart Admin"
# swagger
api_info = openapi.Info(
title="Lively Heart API",
default_version="v1",
description="API documentation for Lively Heart App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ef679fa89caf7d38e7aa2766c74680ff885e8be4 | ae9bb7babce2a0349ae932985cf418a03057c670 | /ProgramAndDataStructure/list/__init__.py | 50e5397d5291650f1e1f4a4e99a244b430ba0f89 | [] | no_license | Veraun/HogwartsSDET17-1 | d2592fcb4c9c63724c19bcf9edde349ebcd2c8af | 6648dbfb640b065ff2c76cb6889a8f9e4f124b91 | refs/heads/main | 2023-07-02T05:20:32.161248 | 2021-08-06T03:55:13 | 2021-08-06T03:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | '''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: HogwartsSDET17
@file: __init__.py.py
@time: 2021/5/20 19:54
@Email: Warron.Wang
''' | [
"wei1.wang@ximalaya.com"
] | wei1.wang@ximalaya.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.