row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
41,060
|
HI
|
2899080588fb18af6869ca6b8a7a16bb
|
{
"intermediate": 0.32988452911376953,
"beginner": 0.2611807882785797,
"expert": 0.40893468260765076
}
|
41,061
|
how get mirror reflection of direction (vector2)
|
1ca307584ef9f64b6bf4bfd262e741f4
|
{
"intermediate": 0.2579507529735565,
"beginner": 0.24389562010765076,
"expert": 0.4981536865234375
}
|
41,062
|
defold lua script that reflect some vector3 direction
|
e803be14b476eeccbaace6c4d94af7d7
|
{
"intermediate": 0.34662002325057983,
"beginner": 0.41527292132377625,
"expert": 0.23810705542564392
}
|
41,063
|
import acrcloud
import os
import eyed3
import requests
import json
from acrcloud.recognizer import ACRCloudRecognizer
# ACR Cloud setup
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
dir(acrcloud)
# Initialize the ACRCloud recognizer
recognizer = ACRCloudRecognizer(config)
# Function to recognize the song from an audio file
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result) # Parse the JSON string into a dictionary
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
parameters to include to tagging a audio file are:
Title, contributing artists, artists, album, year of released, track index number in album, genre, copyright label, composer’s name, published by Karthik.
get these by musixmatch
musixmatch_api.py:
import base64
import hmac
from datetime import datetime
from os import urandom
from urllib import parse
from uuid import uuid4
from utils.utils import create_requests_session
class CaptchaError(Exception):
def __init__(self, message):
super(CaptchaError, self).__init__(message)
class UserTokenError(Exception):
def __init__(self, message):
super(UserTokenError, self).__init__(message)
class Musixmatch:
def __init__(self, exception):
self.API_URL = 'https://apic-desktop.musixmatch.com/ws/1.1/'
self.s = create_requests_session()
self.exception = exception
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Musixmatch/0.19.4 Chrome/58.0.3029.110 Electron/1.7.6 Safari/537.36 '
}
self.user_token = None
def sign_request(self, method, params, timestamp):
to_hash = self.API_URL + method + '?' + parse.urlencode(params)
# Thanks to https://github.com/aaronlpv/live-lyrics/blob/master/musixmatch.c for the desktop app hmac key
key = ("IEJ5E8XFaH" "QvIQNfs7IC").encode()
signature = hmac.digest(key, (to_hash + timestamp).encode(), digest='SHA1')
return base64.urlsafe_b64encode(signature).decode()
def get_user_token_old(self):
currenttime = datetime.now()
timestamp = currenttime.strftime('%Y-%m-%dT%H:%M:%SZ')
signature_timestamp = currenttime.strftime('%Y%m%d')
method = 'token.get'
params = {
'format': 'json',
'guid': str(uuid4()),
'timestamp': timestamp,
'build_number': '2017091202',
'lang': 'en-GB',
'app_id': 'web-desktop-app-v1.0'
}
params['signature'] = self.sign_request(method, params, signature_timestamp)
params['signature_protocol'] = 'sha1'
r = self.s.get(self.API_URL + method, params=params, headers=self.headers, cookies={'AWSELB': 'unknown'})
if r.status_code != 200:
raise Exception(r.text)
self.user_token = r.json()['message']['body']['user_token']
if self.user_token == 'UpgradeOnlyUpgradeOnlyUpgradeOnlyUpgradeOnly':
raise Exception('Musixmatch: getting token failed')
return self.user_token
def get_user_token(self):
r = self.s.get(f'{self.API_URL}token.get', headers=self.headers, params={
'user_language': 'en', 'app_id': 'web-desktop-app-v1.0'
}, cookies={
'AWSELB': '0', 'AWSELBCORS': '0'
})
r = r.json()
if r['message']['header']['status_code'] == 401 and r['message']['header']['hint'] == 'captcha':
raise CaptchaError('Captcha required')
elif r['message']['header']['status_code'] != 200:
raise self.exception(f"Error: {r['message']['header']['hint']}")
self.user_token = r['message']['body']['user_token']
if self.user_token == 'UpgradeOnlyUpgradeOnlyUpgradeOnlyUpgradeOnly':
raise UserTokenError('Getting user token failed')
return self.user_token
def _get(self, url: str, query: dict):
params = {
'usertoken': self.user_token,
'app_id': 'web-desktop-app-v1.0',
}
params.update(query)
r = self.s.get(f'{self.API_URL}{url}', params=params, headers=self.headers, cookies={
'AWSELB': '0', 'AWSELBCORS': '0'
})
print("Response Status Code:", r.status_code) # Debugging line
if r.status_code not in [200, 201, 202]:
print("Response Text (Error):", r.text) # Debugging line
raise self.exception(r.text)
print("API Response:", json.dumps(response_content, indent=2)) # Debugging line
r = r.json()
if r['message']['header']['status_code'] == 401 and r['message']['header']['hint'] == 'captcha':
# throw a captcha error
raise CaptchaError('Captcha required')
elif r['message']['header']['status_code'] != 200:
return None
return r['message']['body']
def get_search_by_track(self, track_name: str, artist_name: str, album_name: str):
# needed for richsync?
r = self._get('matcher.track.get', {
'q_track': track_name,
'q_artist': artist_name,
'q_album': album_name,
})
print("Track Data:", r)
return r['track'] if r else None
def get_track_by_isrc(self, isrc: str):
r = self._get('track.get', {'track_isrc': isrc})
return r['track'] if r else None
def get_lyrics_by_id(self, track_id: str):
r = self._get('track.lyrics.get', {'track_id': track_id})
return r['lyrics'] if r else None
def get_subtitle_by_id(self, common_track_id: str):
r = self._get('track.subtitle.get', {'commontrack_id': common_track_id})
return r['subtitle'] if r else None
def get_rich_sync_by_id(self, track_id: str):
# requires track_id and not common_track_id
r = self._get('track.richsync.get', {'track_id': track_id})
return r['richsync'] if r else None
# Inside musixmatch_api.py, in the Musixmatch class
def get_album_cover_url(self, album_id):
album_info = self._get(f'album.get', {'album_id': album_id})
if album_info is None:
raise Exception(f"Could not get album info for album_id {album_id}")
return album_info['album']['album_coverart_100x100']
def get_lyrics_by_metadata(self, track_name: str, artist_name: str, album_name: str):
return self._get('macro.subtitles.get', {
'q_artist': artist_name,
'q_track': track_name,
'q_album': album_name,
'format': 'json',
'namespace': 'lyrics_richsynched',
'optional_calls': 'track.richsync'
})['macro_calls']
|
ac07b58ce82260e6827df4b967655126
|
{
"intermediate": 0.29797378182411194,
"beginner": 0.5434902906417847,
"expert": 0.1585359126329422
}
|
41,064
|
import json
import requests
from acrcloud.recognizer import ACRCloudRecognizer
from musixmatch_api import Musixmatch, CaptchaError, UserTokenError
from mutagen.mp3 import MP3
from mutagen.id3 import ID3, APIC, error
# ACRCloud API credentials
ACR_HOST = "identify-ap-southeast-1.acrcloud.com"
ACR_ACCESS_KEY = "fe9d03703ee501887c5570fff859bee9"
ACR_ACCESS_SECRET = "PFbmdVo4ZjRkT7AI3l1NLGGtGtsgIbC9vs1ydgYb"
# ACR Cloud configuration (update with your credentials)
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
recognizer = ACRCloudRecognizer(config)
# Initialize Musixmatch API (exception handling to be added as per your implementation)
musixmatch = Musixmatch(Exception)
# Function to recognize a song using ACRCloud
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result)
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
def format_time(ts):
'''Converts time in seconds to the format [mm:ss.xx]'''
minutes = int(ts // 60)
seconds = int(ts % 60)
hundredths = int((ts - int(ts)) * 100)
return f'[{minutes:02d}:{seconds:02d}.{hundredths:02d}]'
def process_rich_sync_lyrics(rich_sync_lyrics_json):
'''Converts Musixmatch rich sync data to LRC format'''
lrc_lines = []
try:
# Load the JSON string into a Python object
rich_sync_data = json.loads(rich_sync_lyrics_json)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return None
# Iterate through each line and create formatted LRC lines
for line in rich_sync_data:
ts = format_time(line['ts']) # Start time of the line
lrc_line = f'{ts}{line["x"]}' # Use "x" for the entire line of lyrics
lrc_lines.append(lrc_line)
# Join the formatted lines with line breaks
return '\n'.join(lrc_lines)
# Function to get lyrics from Musixmatch given artist name and song title
def get_lyrics_from_musicxmatch(artist_name, song_title):
try:
user_token = musixmatch.get_user_token()
track_data = musixmatch.get_search_by_track(song_title, artist_name, "")
if track_data:
track_id = track_data['track_id']
rich_sync_data = musixmatch.get_rich_sync_by_id(track_id)
# Print the JSON response for debugging
print(json.dumps(track_data, indent=2))
print(json.dumps(rich_sync_data, indent=2))
if rich_sync_data and 'richsync_body' in rich_sync_data:
rich_sync_lyrics_json = rich_sync_data['richsync_body']
lrc_lyrics = process_rich_sync_lyrics(rich_sync_lyrics_json)
return lrc_lyrics
else:
print("No synced lyrics found.")
return None
else:
print("Track not found in Musixmatch.")
return None
except (CaptchaError, UserTokenError) as e:
print(f"Error while working with Musixmatch: {e}")
return None
def download_album_cover(album_cover_url, save_path, size=(1400, 1400)):
# Download the album cover image from the given URL
response = requests.get(album_cover_url, stream=True)
if response.status_code == 200:
with open(save_path, 'wb') as file:
for chunk in response:
file.write(chunk)
# Resize and save the image externally if size is specified
if size:
from PIL import Image
im = Image.open(save_path)
im = im.resize(size)
im.save(save_path)
def download_small_cover(album_cover_url, save_path, size=(350, 350)):
# If you want to save a smaller version separately
download_album_cover(album_cover_url, save_path, size)
# Note: You should check the appropriate usage rights before downloading images from the internet
def embed_album_art(audio_file_path, album_cover_path):
audio = MP3(audio_file_path, ID3=ID3)
# Add ID3 tag if it doesn't exist
try:
audio.add_tags()
except error as e:
pass
with open(album_cover_path, 'rb') as album_art:
audio.tags.add(
APIC(
encoding=3, # 3 is for utf-8
mime='image/jpeg', # image/jpeg or image/png
type=3, # 3 is for the cover image
desc=u'Cover',
data=album_art.read()
)
)
audio.save(v2_version=3)
# Note: This function assumes you have already downloaded the album cover at 'album_cover_path'
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3' # Replace with actual path
lrc_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.lrc' # Output LRC file path
# Recognize the song using ACRCloud
song_tags = recognize_song(audio_file_path)
if song_tags:
artist_name = song_tags['artists'][0]['name']
song_title = song_tags['title']
print(f"Identified Song: {artist_name} - {song_title}")
# Fetch track data using the recognized song's title and artist name
track_data = musixmatch.get_search_by_track(song_title, artist_name)
if track_data:
track_id = track_data['track_id']
# Fetch the album cover URL using the track ID
album_cover_url = musixmatch.get_album_cover_url_by_track_id(track_id)
if album_cover_url:
album_cover_save_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Album_Cover.jpg'
download_album_cover(album_cover_url, album_cover_save_path) # Download the high-resolution cover
print(f"Album cover saved to: {album_cover_save_path}")
small_album_cover_save_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Small_Album_Cover.jpg'
download_small_cover(album_cover_url, small_album_cover_save_path) # For embedding
print(f"Small album cover saved to: {small_album_cover_save_path}")
# Embed 350x350 album art into the MP3 file
embed_album_art(audio_file_path, small_album_cover_save_path)
print("Album art embedded into the MP3 file.")
else:
print("Could not get album cover.")
# Fetch the synced lyrics using the recognized song's track ID
rich_sync_data = musixmatch.get_rich_sync_by_id(track_id)
if rich_sync_data and 'richsync_body' in rich_sync_data:
rich_sync_lyrics_json = rich_sync_data['richsync_body']
lrc_lyrics = process_rich_sync_lyrics(rich_sync_lyrics_json)
if lrc_lyrics:
# Write the LRC lyrics to a file
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("No synced lyrics found.")
else:
print("Track not found in Musixmatch.")
else:
print("Could not identify the song.")
Identified Song: Nick Jonas - This Is Heaven
Traceback (most recent call last):
File "C:\Users\ILEG-i5-11\Downloads\Compressed\Duplicate\checkfirst.py", line 146, in <module>
track_data = musixmatch.get_search_by_track(song_title, artist_name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: Musixmatch.get_search_by_track() missing 1 required positional argument: 'album_name'
|
e74d3945db3e3ce63b207ed225265c68
|
{
"intermediate": 0.42128786444664,
"beginner": 0.4110065996646881,
"expert": 0.16770553588867188
}
|
41,065
|
import acrcloud
import os
import eyed3
import requests
import json
from acrcloud.recognizer import ACRCloudRecognizer
# ACRCloud API credentials
ACR_HOST = "identify-ap-southeast-1.acrcloud.com"
ACR_ACCESS_KEY = "fe9d03703ee501887c5570fff859bee9"
ACR_ACCESS_SECRET = "PFbmdVo4ZjRkT7AI3l1NLGGtGtsgIbC9vs1ydgYb"
# ACR Cloud setup
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
dir(acrcloud)
# Initialize the ACRCloud recognizer
recognizer = ACRCloudRecognizer(config)
# Function to recognize the song from an audio file
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result) # Parse the JSON string into a dictionary
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
# Function to set ID3 tags
def set_id3_tags_mp3(audio_file_path, tags):
audio_file = eyed3.load(audio_file_path)
if not audio_file.tag:
audio_file.initTag()
audio_file.tag.artist = tags.get('artists')[0].get('name')
audio_file.tag.album = tags.get('album').get('name')
audio_file.tag.album_artist = tags.get('artists')[0].get('name')
audio_file.tag.title = tags.get('title')
# Set the release year (if available)
release_date = tags.get('release_date')
if release_date and len(release_date) >= 4: # Check if release_date contains at least the year
year_string = release_date[:4]
try:
year = int(year_string)
# Some versions of eyeD3 require a Date object if available
if hasattr(eyed3.id3.tag, 'Date'):
audio_file.tag.recording_date = eyed3.id3.tag.Date(year)
else:
# Otherwise, set it as text_frame
audio_file.tag.setTextFrame("TDRC", year_string)
except ValueError:
print(f"Invalid date format in the tag: {release_date}")
# Add more tags here
audio_file.tag.genre = tags.get('genres')[0].get('name') # Assuming there's at least one genre
audio_file.tag.publisher = "Karthik" # Publisher tag set as 'karthik'
# To save the copyright label:
audio_file.tag.copyright = tags.get('label', '')
audio_file.tag.comments.set(u"Explicit: Yes")
audio_file.tag.save(version=eyed3.id3.ID3_V2_3)
audio_file.tag.save()
# Replace 'path_to_your_audio_file.mp3' with the actual file path of the unknown song
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3'
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
# Renaming the file after identifying the song and setting tags
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
if artist_name and song_title:
new_file_name = f"{artist_name} - {song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song.')
I have lyricsprovider.py
how to include in this code and call that after execution of above code
|
026e2eb3960a14779ee325fbbc49b47d
|
{
"intermediate": 0.37207919359207153,
"beginner": 0.4024667739868164,
"expert": 0.22545406222343445
}
|
41,066
|
from ipaddress import *
net = ip_network('136.36.240.16/255.255.255.248',0)
c = 0
for ip in net:
b = bin(ip)[2:]
if b.count('101')==0:
c+=1
print(c)
почему выдает ошибку? TypeError: 'IPv4Address' object cannot be interpreted as an integer
|
2d7cad70691f2292e6119f16caadae31
|
{
"intermediate": 0.23145988583564758,
"beginner": 0.5680987238883972,
"expert": 0.20044130086898804
}
|
41,067
|
my code:
import acrcloud
import os
import eyed3
import requests
import json
from acrcloud.recognizer import ACRCloudRecognizer
# ACRCloud API credentials
ACR_HOST = "identify-ap-southeast-1.acrcloud.com"
ACR_ACCESS_KEY = "fe9d03703ee501887c5570fff859bee9"
ACR_ACCESS_SECRET = "PFbmdVo4ZjRkT7AI3l1NLGGtGtsgIbC9vs1ydgYb"
# ACR Cloud setup
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
dir(acrcloud)
# Initialize the ACRCloud recognizer
recognizer = ACRCloudRecognizer(config)
# Function to recognize the song from an audio file
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result) # Parse the JSON string into a dictionary
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
# Function to set ID3 tags
def set_id3_tags_mp3(audio_file_path, tags):
audio_file = eyed3.load(audio_file_path)
if not audio_file.tag:
audio_file.initTag()
audio_file.tag.artist = tags.get('artists')[0].get('name')
audio_file.tag.album = tags.get('album').get('name')
audio_file.tag.album_artist = tags.get('artists')[0].get('name')
audio_file.tag.title = tags.get('title')
# Set the release year (if available)
release_date = tags.get('release_date')
if release_date and len(release_date) >= 4: # Check if release_date contains at least the year
year_string = release_date[:4]
try:
year = int(year_string)
# Some versions of eyeD3 require a Date object if available
if hasattr(eyed3.id3.tag, 'Date'):
audio_file.tag.recording_date = eyed3.id3.tag.Date(year)
else:
# Otherwise, set it as text_frame
audio_file.tag.setTextFrame("TDRC", year_string)
except ValueError:
print(f"Invalid date format in the tag: {release_date}")
# Add more tags here
audio_file.tag.genre = tags.get('genres')[0].get('name') # Assuming there's at least one genre
audio_file.tag.publisher = "Karthik" # Publisher tag set as 'karthik'
# To save the copyright label:
audio_file.tag.copyright = tags.get('label', '')
audio_file.tag.comments.set(u"Explicit: Yes")
audio_file.tag.save(version=eyed3.id3.ID3_V2_3)
audio_file.tag.save()
# Replace 'path_to_your_audio_file.mp3' with the actual file path of the unknown song
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3'
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
# Renaming the file after identifying the song and setting tags
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
if artist_name and song_title:
new_file_name = f"{artist_name} - {song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song.')
after renaming the song file, call the lyricprovi.py
import json
import requests
from acrcloud.recognizer import ACRCloudRecognizer
from musixmatch_api import Musixmatch, CaptchaError, UserTokenError
# ACRCloud API credentials
ACR_HOST = "identify-ap-southeast-1.acrcloud.com"
ACR_ACCESS_KEY = "fe9d03703ee501887c5570fff859bee9"
ACR_ACCESS_SECRET = "PFbmdVo4ZjRkT7AI3l1NLGGtGtsgIbC9vs1ydgYb"
# ACR Cloud configuration (update with your credentials)
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
recognizer = ACRCloudRecognizer(config)
# Initialize Musixmatch API (exception handling to be added as per your implementation)
musixmatch = Musixmatch(Exception)
# Function to recognize a song using ACRCloud
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result)
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
def format_time(ts):
'''Converts time in seconds to the format [mm:ss.xx]'''
minutes = int(ts // 60)
seconds = int(ts % 60)
hundredths = int((ts - int(ts)) * 100)
return f'[{minutes:02d}:{seconds:02d}.{hundredths:02d}]'
def process_rich_sync_lyrics(rich_sync_lyrics_json):
'''Converts Musixmatch rich sync data to LRC format'''
lrc_lines = []
try:
# Load the JSON string into a Python object
rich_sync_data = json.loads(rich_sync_lyrics_json)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return None
# Iterate through each line and create formatted LRC lines
for line in rich_sync_data:
ts = format_time(line['ts']) # Start time of the line
lrc_line = f'{ts}{line["x"]}' # Use "x" for the entire line of lyrics
lrc_lines.append(lrc_line)
# Join the formatted lines with line breaks
return '\n'.join(lrc_lines)
# Function to get lyrics from Musixmatch given artist name and song title
def get_lyrics_from_musicxmatch(artist_name, song_title):
try:
user_token = musixmatch.get_user_token()
track_data = musixmatch.get_search_by_track(song_title, artist_name, "")
if track_data:
track_id = track_data['track_id']
rich_sync_data = musixmatch.get_rich_sync_by_id(track_id)
# Print the JSON response for debugging
print(json.dumps(track_data, indent=2))
print(json.dumps(rich_sync_data, indent=2))
if rich_sync_data and 'richsync_body' in rich_sync_data:
rich_sync_lyrics_json = rich_sync_data['richsync_body']
lrc_lyrics = process_rich_sync_lyrics(rich_sync_lyrics_json)
return lrc_lyrics
else:
print("No synced lyrics found.")
return None
else:
print("Track not found in Musixmatch.")
return None
except (CaptchaError, UserTokenError) as e:
print(f"Error while working with Musixmatch: {e}")
return None
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3' # Replace this path
lrc_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.lrc' # Output LRC file path
# Recognize the song using ACRCloud
song_tags = recognize_song(audio_file_path)
if song_tags:
artist_name = song_tags['artists'][0]['name']
song_title = song_tags['title']
print(f"Identified Song: {artist_name} - {song_title}")
# Fetch the synced lyrics (rich sync) using the recognized song information
lrc_lyrics = get_lyrics_from_musicxmatch(artist_name, song_title)
if lrc_lyrics:
# Write the LRC lyrics to a file
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
else:
print("Could not identify the song.")
after renaming the file the file name changes so set automatically file name mp3 and lyrics also must output same name as the audio file
|
f7a9095c3c2a374f3f042b95de4513d2
|
{
"intermediate": 0.44590988755226135,
"beginner": 0.30237555503845215,
"expert": 0.2517145574092865
}
|
41,068
|
mycode.py:
import acrcloud
import os
import eyed3
import requests
import json
from acrcloud.recognizer import ACRCloudRecognizer
# ACRCloud API credentials
ACR_HOST = "identify-ap-southeast-1.acrcloud.com"
ACR_ACCESS_KEY = "fe9d03703ee501887c5570fff859bee9"
ACR_ACCESS_SECRET = "PFbmdVo4ZjRkT7AI3l1NLGGtGtsgIbC9vs1ydgYb"
# ACR Cloud setup
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
dir(acrcloud)
# Initialize the ACRCloud recognizer
recognizer = ACRCloudRecognizer(config)
# Function to recognize the song from an audio file
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result) # Parse the JSON string into a dictionary
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
# Function to set ID3 tags
def set_id3_tags_mp3(audio_file_path, tags):
audio_file = eyed3.load(audio_file_path)
if not audio_file.tag:
audio_file.initTag()
audio_file.tag.artist = tags.get('artists')[0].get('name')
audio_file.tag.album = tags.get('album').get('name')
audio_file.tag.album_artist = tags.get('artists')[0].get('name')
audio_file.tag.title = tags.get('title')
# Set the release year (if available)
release_date = tags.get('release_date')
if release_date and len(release_date) >= 4: # Check if release_date contains at least the year
year_string = release_date[:4]
try:
year = int(year_string)
# Some versions of eyeD3 require a Date object if available
if hasattr(eyed3.id3.tag, 'Date'):
audio_file.tag.recording_date = eyed3.id3.tag.Date(year)
else:
# Otherwise, set it as text_frame
audio_file.tag.setTextFrame("TDRC", year_string)
except ValueError:
print(f"Invalid date format in the tag: {release_date}")
# Add more tags here
audio_file.tag.genre = tags.get('genres')[0].get('name') # Assuming there's at least one genre
audio_file.tag.publisher = "Karthik" # Publisher tag set as 'karthik'
# To save the copyright label:
audio_file.tag.copyright = tags.get('label', '')
audio_file.tag.comments.set(u"Explicit: Yes")
audio_file.tag.save(version=eyed3.id3.ID3_V2_3)
audio_file.tag.save()
# Replace 'path_to_your_audio_file.mp3' with the actual file path of the unknown song
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3'
song_tags = recognize_song(audio_file_path)
if song_tags:
print(f'Song identified: {song_tags}')
set_id3_tags_mp3(audio_file_path, song_tags)
# Renaming the file after identifying the song and setting tags
artist_name = song_tags.get('artists')[0].get('name')
song_title = song_tags.get('title')
if artist_name and song_title:
new_file_name = f"{artist_name} - {song_title}.mp3"
new_file_path = os.path.join(os.path.dirname(audio_file_path), new_file_name)
os.rename(audio_file_path, new_file_path)
print(f"File has been renamed to: {new_file_name}")
else:
print('Could not identify the song.')
it renames my song unknown_file to artist name - track name
in my lyricsprovider.py: remove that unknown_file path to artist name - track name
import json
import requests
from acrcloud.recognizer import ACRCloudRecognizer
from musixmatch_api import Musixmatch, CaptchaError, UserTokenError
# ACRCloud API credentials
ACR_HOST = "identify-ap-southeast-1.acrcloud.com"
ACR_ACCESS_KEY = "fe9d03703ee501887c5570fff859bee9"
ACR_ACCESS_SECRET = "PFbmdVo4ZjRkT7AI3l1NLGGtGtsgIbC9vs1ydgYb"
# ACR Cloud configuration (update with your credentials)
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
recognizer = ACRCloudRecognizer(config)
# Initialize Musixmatch API (exception handling to be added as per your implementation)
musixmatch = Musixmatch(Exception)
# Function to recognize a song using ACRCloud
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result)
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
def format_time(ts):
'''Converts time in seconds to the format [mm:ss.xx]'''
minutes = int(ts // 60)
seconds = int(ts % 60)
hundredths = int((ts - int(ts)) * 100)
return f'[{minutes:02d}:{seconds:02d}.{hundredths:02d}]'
def process_rich_sync_lyrics(rich_sync_lyrics_json):
'''Converts Musixmatch rich sync data to LRC format'''
lrc_lines = []
try:
# Load the JSON string into a Python object
rich_sync_data = json.loads(rich_sync_lyrics_json)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return None
# Iterate through each line and create formatted LRC lines
for line in rich_sync_data:
ts = format_time(line['ts']) # Start time of the line
lrc_line = f'{ts}{line["x"]}' # Use "x" for the entire line of lyrics
lrc_lines.append(lrc_line)
# Join the formatted lines with line breaks
return '\n'.join(lrc_lines)
# Function to get lyrics from Musixmatch given artist name and song title
def get_lyrics_from_musicxmatch(artist_name, song_title):
try:
user_token = musixmatch.get_user_token()
track_data = musixmatch.get_search_by_track(song_title, artist_name, "")
if track_data:
track_id = track_data['track_id']
rich_sync_data = musixmatch.get_rich_sync_by_id(track_id)
# Print the JSON response for debugging
print(json.dumps(track_data, indent=2))
print(json.dumps(rich_sync_data, indent=2))
if rich_sync_data and 'richsync_body' in rich_sync_data:
rich_sync_lyrics_json = rich_sync_data['richsync_body']
lrc_lyrics = process_rich_sync_lyrics(rich_sync_lyrics_json)
return lrc_lyrics
else:
print("No synced lyrics found.")
return None
else:
print("Track not found in Musixmatch.")
return None
except (CaptchaError, UserTokenError) as e:
print(f"Error while working with Musixmatch: {e}")
return None
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3' # Replace this path
lrc_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.lrc' # Output LRC file path
# Recognize the song using ACRCloud
song_tags = recognize_song(audio_file_path)
if song_tags:
artist_name = song_tags['artists'][0]['name']
song_title = song_tags['title']
print(f"Identified Song: {artist_name} - {song_title}")
# Fetch the synced lyrics (rich sync) using the recognized song information
lrc_lyrics = get_lyrics_from_musicxmatch(artist_name, song_title)
if lrc_lyrics:
# Write the LRC lyrics to a file
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
else:
print("Could not identify the song.")
|
9cc840fddffb9669e4d12d64a81ead9f
|
{
"intermediate": 0.41855859756469727,
"beginner": 0.344873309135437,
"expert": 0.2365681380033493
}
|
41,069
|
# Task 4: streamgraph.
# Use streamgraph to visualize a movie dataset, showing the number of movie genres over time.
# first get the data from the package ggplot2movies
install.packages('ggplot2movies')
library(ggplot2movies)
dat <- ggplot2movies::movies
# The movies dataset is in the wide format for movie genres.
# notice that streamgraph requires a long-format data.
# Data shape can be wide or long.
# In the wide format, each observation or case is represented by a single row, and each variable is represented by a separate column.
# Wide data is often used when the variables are measured or recorded at a fixed set of time points or categories.
# In the long format, the data is organized in a way that allows for multiple observations or measurements for each combination of variables.
# Long data is often used when the variables are measured or recorded repeatedly over time or in different categories.
# You will perform the following steps to generate the graph:
# a) select columns of year and genre. notice that the data has a wide format
# and you need to convert it into long format first.
# hint: use the function "melt" from reshape2 library to convert the data frame
# b) count the frequency of each genre per year
# c) show the result with a streamgraph, and include a selection menu
|
5e92e0802729845e772d34905a096e4a
|
{
"intermediate": 0.5973877310752869,
"beginner": 0.2043658196926117,
"expert": 0.19824646413326263
}
|
41,070
|
# Task 4: streamgraph.
# Use streamgraph to visualize a movie dataset, showing the number of movie genres over time.
# first get the data from the package ggplot2movies
install.packages(‘ggplot2movies’)
library(ggplot2movies)
dat <- ggplot2movies::movies
# The movies dataset is in the wide format for movie genres.
# notice that streamgraph requires a long-format data.
# Data shape can be wide or long.
# In the wide format, each observation or case is represented by a single row, and each variable is represented by a separate column.
# Wide data is often used when the variables are measured or recorded at a fixed set of time points or categories.
# In the long format, the data is organized in a way that allows for multiple observations or measurements for each combination of variables.
# Long data is often used when the variables are measured or recorded repeatedly over time or in different categories.
# You will perform the following steps to generate the graph:
# a) select columns of year and genre. notice that the data has a wide format
# and you need to convert it into long format first.
# hint: use the function “melt” from reshape2 library to convert the data frame
# b) count the frequency of each genre per year
# c) show the result with a streamgraph, and include a selection menu
|
92e9020a660628e4cc70672ee294c654
|
{
"intermediate": 0.5772339701652527,
"beginner": 0.2177974283695221,
"expert": 0.2049686163663864
}
|
41,071
|
import requests
import pandas as pd
from time import sleep
# مجموعة لتخزين الأحداث المطبوعة بالفعل لتجنب التكرار
printed_events = set()
def fetch_live_players(printed_events):
fixtures_url = 'https://fantasy.premierleague.com/api/fixtures/'
fixtures_response = requests.get(fixtures_url)
players_url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
players_response = requests.get(players_url)
if fixtures_response.status_code == 200 and players_response.status_code == 200:
fixtures_data = fixtures_response.json()
players_data = players_response.json()
live_fixtures = [fixture for fixture in fixtures_data if not fixture['finished'] and fixture['started']]
players_df = pd.DataFrame(players_data['elements'])
teams_df = pd.DataFrame(players_data['teams'])
if live_fixtures:
for fixture in live_fixtures:
event_id = fixture['event']
live_url = f'https://fantasy.premierleague.com/api/event/{event_id}/live/'
live_response = requests.get(live_url)
if live_response.status_code == 200:
live_data = live_response.json()['elements']
for element in live_data:
element_info = players_df.loc[players_df['id'] == element['id']].iloc[0]
team_info = teams_df.loc[teams_df['id'] == element_info['team']].iloc[0]
player_name = element_info['web_name']
stats = element['stats']
# تحقق من الأهداف
if stats['goals_scored'] > 0:
event_key = f"{player_name}-goal-{stats['goals_scored']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Goal⚽: {player_name}, {team_info['name']}- P: {stats['goals_scored']*6}, Tot: {stats['total_points']}")
# تحقق من الأسيست
if stats['assists'] > 0:
event_key = f"{player_name}-assist-{stats['assists']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Assist🅰️: {player_name}, {team_info['name']}- P: {stats['assists']*3}, Tot: {stats['total_points']}")
# تحقق من البطاقات الصفراء
if stats['yellow_cards'] > 0:
event_key = f"{player_name}-yellow-{stats['yellow_cards']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Yellow Card🟨: #{player_name}, #{team_info['name']}- P: -{stats['yellow_cards']*1}, Tot: {stats['total_points']}")
# تحقق من البطاقات الحمراء
if stats['red_cards'] > 0:
event_key = f"{player_name}-red-{stats['red_cards']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Red Card: {player_name}, {team_info['name']}, Event Points: -{stats['red_cards']*3}, Total Points: {stats['total_points']}")
else:
print('Failed to retrieve data.')
# نقطة بدء البرنامج
def main():
while True:
try:
fetch_live_players(printed_events)
except Exception as e:
print(e) # لدينا الآن الخطأ المطبوع في حال حدوث أي استثناء
sleep(60) # فترة الانتظار بين كل تحقق هي 60 ثانية
if __name__ == "__main__":
main()
|
ef2f3ec2d15f3a9609d815721e665d3b
|
{
"intermediate": 0.40353623032569885,
"beginner": 0.3928612172603607,
"expert": 0.20360258221626282
}
|
41,072
|
Hello
|
43ce7185b337ee64edd1030e0d2066c7
|
{
"intermediate": 0.3123404085636139,
"beginner": 0.2729349136352539,
"expert": 0.4147246778011322
}
|
41,073
|
Develop a signup and login page design that incorporates a unique navigation system, such as a hamburger menu or a slide-out panel.
Use HTML, CSS, and JavaScript to create a signup and login page with a unique navigation system, such as a hamburger menu or a slide-out panel. Incorporate animations and transitions to enhance the user experience, such as a smooth slide-out effect for the navigation panel or a subtle fade-in effect for the login form. Ensure the design is responsive and works well on both desktop and mobile devices.
|
f0f663e634ab1cadf2c613a7df25a757
|
{
"intermediate": 0.34028542041778564,
"beginner": 0.4275043308734894,
"expert": 0.23221023380756378
}
|
41,074
|
in lyricprovdei.py:
import json
import requests
from acrcloud.recognizer import ACRCloudRecognizer
from musixmatch_api import Musixmatch, CaptchaError, UserTokenError
# ACR Cloud configuration (update with your credentials)
config = {
'host': ACR_HOST,
'access_key': ACR_ACCESS_KEY,
'access_secret': ACR_ACCESS_SECRET,
'timeout': 10 # seconds
}
recognizer = ACRCloudRecognizer(config)
# Initialize Musixmatch API (exception handling to be added as per your implementation)
musixmatch = Musixmatch(Exception)
# Function to recognize a song using ACRCloud
def recognize_song(audio_file_path):
buffer = open(audio_file_path, 'rb').read()
result = recognizer.recognize_by_filebuffer(buffer, 0)
try:
result_dict = json.loads(result)
return result_dict['metadata']['music'][0]
except (KeyError, IndexError, json.JSONDecodeError) as e:
print(f"Error while parsing result: {e}")
return None
def format_time(ts):
'''Converts time in seconds to the format [mm:ss.xx]'''
minutes = int(ts // 60)
seconds = int(ts % 60)
hundredths = int((ts - int(ts)) * 100)
return f'[{minutes:02d}:{seconds:02d}.{hundredths:02d}]'
def process_rich_sync_lyrics(rich_sync_lyrics_json):
'''Converts Musixmatch rich sync data to LRC format'''
lrc_lines = []
try:
# Load the JSON string into a Python object
rich_sync_data = json.loads(rich_sync_lyrics_json)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return None
# Iterate through each line and create formatted LRC lines
for line in rich_sync_data:
ts = format_time(line['ts']) # Start time of the line
lrc_line = f'{ts}{line["x"]}' # Use "x" for the entire line of lyrics
lrc_lines.append(lrc_line)
# Join the formatted lines with line breaks
return '\n'.join(lrc_lines)
# Function to get lyrics from Musixmatch given artist name and song title
def get_lyrics_from_musicxmatch(artist_name, song_title):
try:
user_token = musixmatch.get_user_token()
track_data = musixmatch.get_search_by_track(song_title, artist_name, "")
if track_data:
track_id = track_data['track_id']
rich_sync_data = musixmatch.get_rich_sync_by_id(track_id)
# Print the JSON response for debugging
print(json.dumps(track_data, indent=2))
print(json.dumps(rich_sync_data, indent=2))
if rich_sync_data and 'richsync_body' in rich_sync_data:
rich_sync_lyrics_json = rich_sync_data['richsync_body']
lrc_lyrics = process_rich_sync_lyrics(rich_sync_lyrics_json)
return lrc_lyrics
else:
print("No synced lyrics found.")
return None
else:
print("Track not found in Musixmatch.")
return None
except (CaptchaError, UserTokenError) as e:
print(f"Error while working with Musixmatch: {e}")
return None
if __name__ == "__main__":
audio_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.mp3' # Replace this path
lrc_file_path = 'C:/Users/ILEG-i5-11/Downloads/Music/Unknown_file.lrc' # Output LRC file path
# Recognize the song using ACRCloud
song_tags = recognize_song(audio_file_path)
if song_tags:
artist_name = song_tags['artists'][0]['name']
song_title = song_tags['title']
print(f"Identified Song: {artist_name} - {song_title}")
# Fetch the synced lyrics (rich sync) using the recognized song information
lrc_lyrics = get_lyrics_from_musicxmatch(artist_name, song_title)
if lrc_lyrics:
# Write the LRC lyrics to a file
with open(lrc_file_path, 'w', encoding='utf-8') as lrc_file:
lrc_file.write(lrc_lyrics)
print(f"Saved LRC file to: {lrc_file_path}")
else:
print("Could not get the lyrics.")
else:
print("Could not identify the song.")
this code does identifying song and outputing lyrics right?
in place of unknown_file.lrc saving I need to save artist name - track - name in synchronised format
|
c5b9d6b9cb55c4411bf777711a543c00
|
{
"intermediate": 0.43380776047706604,
"beginner": 0.4277348518371582,
"expert": 0.13845735788345337
}
|
41,075
|
<s>[INST] <<SYS>>You are a student working in the Python programming domain. Below, I give you a target task and a student's misconception. You are going to play the role of the given student and synthesize a buggy program for the target task that reflects the misconception. Additionally, synthesize a correct solution program for the target task.
<</SYS>>
--- Target Task: Description in JSON format ---
{
"problem_name": "Merge two strings",
"problem_description": "Given two strings S1 and S2 as input, the task is to merge them alternatively i.e. the first character of S1 then the first character of S2 and so on till the strings end. NOTE: Add the whole string if other string is empty.",
"expected_time_complexity": "O(|S1| + |S2|)",
"expected_auxiliary_space": "O(1)",
"constraints": "1<=|S1|, |S2| <=10^3",
"number_of_buggy_programs": 10,
"source": "geeksforgeeks",
"problem_url":"https://practice.geeksforgeeks.org/problems/merge-two-strings2736/1?page=1",
"tests": "tests.json",
"your_task": "You don't need to read input or print anything. Your task is to complete the function merge() which takes the strings S1 and S2 as input and returns the resultant string by merging both S1 and S2 alternatively starting from S1."
}
--- Target Task: Student's misconception ---
There is a issue regarding the slicing of strings.
--- Target Task: Student's buggy program ---
--- Target Task: Correct solution program ---
[/INST]
|
0866f4e89d4b84dc2a3f6f564e375d62
|
{
"intermediate": 0.36005714535713196,
"beginner": 0.299048513174057,
"expert": 0.34089431166648865
}
|
41,076
|
import pandas as pd
import requests
from time import sleep
from fake_useragent import UserAgent
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import threading
# Initialize a user agent generator
ua = UserAgent()
# Lock object for thread safety when accessing the printed_events set
lock = threading.Lock()
# Set to store the events that have already been printed
printed_events = set()
def get_fake_headers():
return {
"User-Agent": ua.random,
"Accept-Encoding": "gzip, deflate",
"Accept": "application/json",
"Connection": "keep-alive"
}
def requests_retry_session(retries=5, backoff_factor=0.3, status_forcelist=(500, 502, 504)):
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def fetch_live_event(fixture, players_df, teams_df, printed_events):
event_id = fixture['event']
live_url = f'https://fantasy.premierleague.com/api/event/{event_id}/live/'
try:
response = requests_retry_session().get(live_url, headers=get_fake_headers())
if response.status_code == 200:
live_data = response.json()['elements']
for element in live_data:
# Your code for processing the individual player’s live event data goes here…
pass # Replace with actual processing code
except Exception as e:
print(f"Error fetching live event data: {e}")
def fetch_live_fixtures():
fixtures_url = 'https://fantasy.premierleague.com/api/fixtures/'
try:
response = requests_retry_session().get(fixtures_url, headers=get_fake_headers())
if response.status_code == 200:
fixtures_data = response.json()
live_fixtures = [fixture for fixture in fixtures_data if not fixture['finished'] and fixture['started']]
players_url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
players_response = requests_retry_session().get(players_url, headers=get_fake_headers())
if players_response.status_code == 200:
players_data = players_response.json()
players_df = pd.DataFrame(players_data['elements'])
teams_df = pd.DataFrame(players_data['teams'])
threads = []
for fixture in live_fixtures:
thread = threading.Thread(target=fetch_live_event, args=(fixture, players_df, teams_df, printed_events))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
except Exception as e:
print(f"Error fetching fixtures data: {e}")
def main():
while True:
fetch_live_fixtures()
# Add a delay to avoid hitting API limits
sleep(10)
if __name__ == "__main__":
main() في هذا السكريبت ما الأحداث الذي يعرضها على سبيل المثال
|
5db4249950e083f8dd046c99a7c1a592
|
{
"intermediate": 0.4443039000034332,
"beginner": 0.4477272927761078,
"expert": 0.10796882957220078
}
|
41,077
|
can you translate some text in brazilian portuguese in a adapted form
|
631f3e53dfa494f890f329afcf7367fd
|
{
"intermediate": 0.3996133804321289,
"beginner": 0.2964123785495758,
"expert": 0.3039742410182953
}
|
41,078
|
import discord
from discord import Client, Intents
import pymongo
from pymongo import MongoClient
client = discord.Client()
cluster = MongoClient("")
db = cluster["Scores"]
collection = db["scoreCounter"]
wordstotestby = [
"word1",
"word2"
]
token = open("./botToken", "r").read()
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='MongoDB'))
change_presence 是做什么用的?
|
8830b0f19579d43e316cde0ee4e6b178
|
{
"intermediate": 0.38955333828926086,
"beginner": 0.4148261845111847,
"expert": 0.19562040269374847
}
|
41,079
|
sort 2 lists at the same time python
|
d335051b27afc0eca3fbb1c34b8b3702
|
{
"intermediate": 0.3017265498638153,
"beginner": 0.20718072354793549,
"expert": 0.491092711687088
}
|
41,080
|
optimise my soludtion "def lik(predictions2):
dataset = []
# Initialize dictionaries to store encountered object names, IDs, and object types
encountered_ids = set()
encountered_names = defaultdict(int)
encountered_object_types = defaultdict(int)
tencounterd_ids = 0 # Initialize tencounterd_ids variable
# Initialize a list to store the annotations
annotations = []
# Iterate over the predictions and create annotations
for result in predictions2:
for box in result[0].boxes:
cords = [round(coord) for coord in box.xyxy[0].tolist()]
conf = round(box.conf[0].item(), 2)
if box.id is None:
cls = int(box.cls)
name_object = result[0].names[cls]
tencounterd_ids += 1 # Increment tencounterd_ids
id_track = tencounterd_ids
if name_object in encountered_ids:
count = encountered_names[name_object] + 1
encountered_ids.add((name_object, count)) # Increment count and add to encountered_ids
continue
else :
count = 0
encountered_ids.add((name_object, count))
continue
else:
id_track = int(box.id)
cls = int(box.cls)
name_object = result[0].names[cls]
# Check if (name_object, id_track) combination is encountered
if (name_object, id_track) in encountered_ids:
continue
else:
encountered_ids.add((name_object, id_track))
# Update encountered names and counts
count = encountered_names[name_object] + 1
encountered_names[name_object] = count
name_object_with_count = f"{name_object}"
# Update encountered object types and counts
encountered_object_types[name_object] += 1
center_x = (cords[0] + cords[2]) / 2
center_y = (cords[1] + cords[3]) / 2
image_center_x = result[0].orig_shape[1] / 2
image_center_y = result[0].orig_shape[0] / 2
location_x = "left" if center_x < image_center_x else "right"
location_y = "above" if center_y < image_center_y else "below"
width = abs(cords[2] - cords[0])
height = abs(cords[3] - cords[1])
# Update or append annotation
annotation = next((a for a in annotations if a["object_type_name"] == name_object_with_count), None)
if annotation:
annotation["count"] += 1
else:
annotation = {
"object_type_name": name_object_with_count,
"count": 1,
"dimension": (width, height)
}
annotations.append(annotation)
return annotations, tencounterd_ids
|
9833b92951f683444be5bab8804cd1ae
|
{
"intermediate": 0.28154417872428894,
"beginner": 0.48420652747154236,
"expert": 0.23424923419952393
}
|
41,081
|
How can I pimprove the structure of this code
using System;
using System.Collections.Generic;
using Game.Systems.Buildings;
using Game.Systems.UI;
using TMPro;
using UnityEngine;
using UnityEngine.Serialization;
using UnityEngine.UI;
namespace Game.Systems
{
public enum BuildingPanelType
{
A,B,C,D
}
public enum ProductionType
{
Food, Wood
}
public class BuildingPanelManager : MonoBehaviour
{
[SerializeField] private PanelA panelA;
[SerializeField] private PanelB panelB;
[SerializeField] private PanelC panelC;
[SerializeField] private PanelD panelD;
private List<GameObject> panels = new List<GameObject>();
private BuildingPanelType currentPanel;
private RectTransform currentTransform;
private BuildingBase currentBuilding;
private void Start()
{
panels = new List<GameObject> { panelA.gameObject, panelB.gameObject, panelC.gameObject, panelD.gameObject};
DisableAllPanels();
}
private void Update()
{
CheckClickOutsidePanel();
}
private void CheckClickOutsidePanel()
{
if (Input.GetMouseButtonDown(0)) // Check for left click
{
if (currentTransform != null)
{
Vector2 localMousePosition = currentTransform.InverseTransformPoint(Input.mousePosition);
if (!currentTransform.rect.Contains(localMousePosition))
{
DisableAllPanels();
}
}
}
}
public void Init(BuildingPanelType type, string name, string description, BuildingBase buildingBase)
{
switch (type)
{
case BuildingPanelType.A:
DisableAllPanels();
buildingBase.showingPanel = true;
panelA.Init(name, description);
currentPanel = type;
currentTransform = panelA.area;
break;
case BuildingPanelType.B:
DisableAllPanels();
buildingBase.showingPanel = true;
panelB.Init(name, description);
currentPanel = type;
currentTransform = panelB.area;
break;
case BuildingPanelType.C:
DisableAllPanels();
buildingBase.showingPanel = true;
panelC.Init(name, description);
currentPanel = type;
currentTransform = panelC.area;
break;
}
currentBuilding = buildingBase;
}
public void Init(BuildingPanelType type, string name, string description, BuildingBase buildingBase, Sprite sprite)
{
switch (type)
{
case BuildingPanelType.D:
DisableAllPanels();
buildingBase.showingPanel = true;
panelD.Init(name, description);
panelD.SetSprite(sprite);
currentPanel = type;
currentTransform = panelD.area;
break;
}
currentBuilding = buildingBase;
}
public void UpdateHealth(BuildingBase sender, float health, float maxHealth)
{
if(sender != currentBuilding) return;
switch (currentPanel)
{
case BuildingPanelType.A:
panelA.UpdateHealth(health, maxHealth);
break;
case BuildingPanelType.B:
panelB.UpdateHealth(health, maxHealth);
break;
case BuildingPanelType.C:
panelC.UpdateHealth(health, maxHealth);
break;
case BuildingPanelType.D:
panelD.UpdateHealth(health, maxHealth);
break;
}
}
public void UpdateFixedText(string fixedText)
{
switch (currentPanel)
{
case BuildingPanelType.C:
panelC.UpdateFixedText(fixedText);
break;
}
}
public void UpdateProduction(BuildingBase sender, ProductionType type, bool isDay, float amount, float totalAmount, float percentage,
float remainingTime, int asignedVillagers, TMP_SpriteAsset tmpSpriteAsset = null)
{
if(sender != currentBuilding) return;
panelB.UpdateProduction(type, isDay, amount, totalAmount, percentage, remainingTime,
asignedVillagers, tmpSpriteAsset);
}
public void UpdateProduction(BuildingBase sender, int asignedVillagers, float boostPerVillager, TMP_SpriteAsset tmpSpriteAsset = null)
{
if(sender != currentBuilding) return;
panelC.UpdateProduction(asignedVillagers, boostPerVillager);
}
public void UpdateProduction(BuildingBase sender, bool isDay, float percentage, string pendingSoldiers)
{
if(sender != currentBuilding) return;
panelD.UpdateProduction(isDay, percentage, pendingSoldiers);
}
public void UpdateWorkers(BuildingBase sender, int villagersActive, int villagersAvailable)
{
if(sender != currentBuilding) return;
switch (currentPanel)
{
case BuildingPanelType.B:
panelB.UpdateWorkers(villagersActive, villagersAvailable);
break;
case BuildingPanelType.C:
panelC.UpdateWorkers(villagersActive, villagersAvailable);
break;
}
}
public void DisableAllPanels()
{
for (int i = 0; i < panels.Count; i++)
{
panels[i].SetActive(false);
}
if (currentBuilding != null)
{
currentBuilding.showingPanel = true;
currentBuilding.DisablePanel();
}
}
public void RemoveWorker()
{
if (currentBuilding != null && currentBuilding.assignedVillagers > 0)
{
currentBuilding.RemoveVillager();
GameManager.Instance.VillagersPlacer.AddVillagers(1);
}
}
public void DemolishBuilding()
{
if (currentBuilding != null)
{
currentBuilding.DemolishedAction();
DisableAllPanels();
}
}
}
}
|
a788f4268e1da2aee7ce76833dfee9fa
|
{
"intermediate": 0.42464813590049744,
"beginner": 0.3801940381526947,
"expert": 0.19515776634216309
}
|
41,082
|
write a script
|
e9c5bd062b4627304d0222fc2cfbc836
|
{
"intermediate": 0.2532443106174469,
"beginner": 0.4877426028251648,
"expert": 0.2590130567550659
}
|
41,083
|
#include <iostream>
#include <cmath>
#include <vector>
#include <algorithm>
// Recombination coefficients
double alphaB_HI(double T4) {
return 2.59e-13 * pow(T4, (-0.833 - 0.034 * log(T4))); // cm^3 s^-1
}
double alphaB_HeI(double T4) {
return 2.72e-13 * pow(T4, -0.789); // cm^3 s^-1
}
double alphaB_Halpha(double T4) {
return 1.17e-13 * pow(T4, (-0.942 - 0.031 * log(T4))); // cm^3 s^-1
}
double alphaB_Hbeta(double T4) {
return 3.03e-14 * pow(T4, (-0.874 - 0.058 * log(T4))); // cm^3 s^-1
}
double alpha1_HeI(double T4) {
return 1.54e-13 * pow(T4, -0.486); // cm^3 s^-1
}
double alphaA_HeI(double T4) {
return alphaB_HeI(T4) + alpha1_HeI(T4);
}
//#Osterbrock Table A5.1
//alphaB_OIII = 3.66*10**-12 #cm^3 s^-1
//alphaB_OII = 3.99*10**-13 #cm^3 s^-1
//RR: Badnel2006 https://iopscience.iop.org/article/10.1086/508465
//DR: Badnell,N.R.1986,J.Phys.B,19,3827. 2006a
//RR and DR summarized in Aaron Smith COLT bitbucket
//https://bitbucket.org/aaron_smith/colt/src/fb0cd32aeadaedce637a2df46780b1a71a1d3864/src/rates.h
//#########################Oxygen#########################################
double alpha_RR_OIII(double T4) {
double T = T4 * 10000;
double ST0 = sqrt(T / 0.1602);
double ST1 = sqrt(T / 4.377e6);
double Bp = 0.7668 + 0.107 * exp(-139200. / T);
return 2.096e-9 / (ST0 * pow((1 + ST0), (1 - Bp)) * pow((1 + ST1), (1 + Bp)));
}
double alpha_RR_OII(double T4) {
double T = T4 * 10000;
double ST0 = sqrt(T / 4.136);
double ST1 = sqrt(T / 4.214e6);
double Bp = 0.6109 + 0.4093 * exp(-87700. / T);
return 6.622e-11 / (ST0 * pow((1 + ST0), (1 - Bp)) * pow((1 + ST1), (1 + Bp)));
}
double alpha_DR_OIII(double T4) {
double T = T4 * 10000;
return pow(T, -1.5) * (1.627e-7 * exp(-45.35 / T) + 1.262e-7 * exp(-284.7 / T) + 6.663e-7 * exp(-4166. / T) + 3.925e-6 * exp(-28770. / T) + 0.002406 * exp(-195300. / T) + 0.001146 * exp(-364600. / T));
}
double alpha_DR_OII(double T4) {
double T = T4 * 10000;
return pow(T, -1.5) * (5.629e-8 * exp(-5395. / T) + 2.55e-7 * exp(-17700. / T) + 0.0006173 * exp(-167100. / T) + 0.0001627 * exp(-268700. / T));
}
double alphaB_OIII(double T4) {
return alpha_RR_OIII(T4) + alpha_DR_OIII(T4);
}
double alphaB_OII(double T4) {
return alpha_RR_OII(T4) + alpha_DR_OII(T4);
}
double delta_OII = 1.05e-9; // cm^3 s^-1
double delta_OI = 1.04e-9; // cm^3 s^-1
double k0_OI_ct(double T4) {
return 1.14e-9 * pow(T4, 0.4 + 0.018 * log(T4));
}
double k1_OI_ct(double T4) {
return 3.44e-10 * pow(T4, 0.451 + 0.036 * log(T4));
}
double k2_OI_ct(double T4) {
return 5.33e-10 * pow(T4, 0.384 + 0.024 * log(T4)) * exp(-97 / T4 / 10000);
}
double k0r_OI_ct(double T4) {
return 8.0 / 5.0 * k0_OI_ct(T4) * exp(-229 / T4 / 10000);
}
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Photoionization cross section for HI
std::vector<double> sigma_HI(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 4.298e-1; // eV
const double sigma0 = 5.475e4; // Mb
const double ya = 3.288e1;
const double P = 2.963;
const double yw = 0.0;
const double y0 = 0.0;
const double y1 = 0.0;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] / eV2J;
// Check energy range
if (E < 13.6 || E > 5e4) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Constants for H-alpha and H-beta frequencies
const double nu_Halpha = 1.89 * eV2J / h; // Hz
const double nu_Hbeta = 2.55 * eV2J / h; // Hz
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// States and constants for HeI
const int g0_HeI = 1;
const int g1_HeI = 1;
const int g2_HeI = 3;
const int g3_HeI = 3;
const double A30_HeI = 1.26e-4; // s^-1
// Interpolation functions for collisional coefficients
std::vector<double> T4_grid = {0.6000, 0.8000, 1.0000, 1.5000, 2.0000, 2.5000};
std::vector<double> k31_grid = {1.95e-8, 2.45e-8, 2.60e-8, 3.05e-8, 2.55e-8, 2.68e-8};
std::vector<double> k32_grid = {2.34e-9, 3.64e-9, 5.92e-9, 7.83e-9, 9.23e-9, 9.81e-9};
std::vector<double> T4_grid_E30 = {3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.25, 5.50, 5.75};
std::vector<double> Omega03_grid = {6.198e-2, 6.458e-2, 6.387e-2, 6.157e-2, 5.832e-2, 5.320e-2, 4.787e-2, 4.018e-2, 3.167e-2};
double k31_HeI(double T4) {
auto it = std::upper_bound(T4_grid.begin(), T4_grid.end(), T4);
int idx = std::distance(T4_grid.begin(), it);
if (idx == 0) idx = 1;
if (idx == T4_grid.size()) idx = T4_grid.size() - 1;
double k31 = k31_grid[idx - 1] + (T4 - T4_grid[idx - 1]) / (T4_grid[idx] - T4_grid[idx - 1]) * (k31_grid[idx] - k31_grid[idx - 1]);
return k31;
}
double k32_HeI(double T4) {
auto it = std::upper_bound(T4_grid.begin(), T4_grid.end(), T4);
int idx = std::distance(T4_grid.begin(), it);
if (idx == 0) idx = 1;
if (idx == T4_grid.size()) idx = T4_grid.size() - 1;
double k32 = k32_grid[idx - 1] + (T4 - T4_grid[idx - 1]) / (T4_grid[idx] - T4_grid[idx - 1]) * (k32_grid[idx] - k32_grid[idx - 1]);
return k32;
}
double k30_HeI(double T4) {
auto it = std::upper_bound(T4_grid_E30.begin(), T4_grid_E30.end(), T4);
int idx = std::distance(T4_grid_E30.begin(), it);
if (idx == 0) idx = 1;
if (idx == T4_grid_E30.size()) idx = T4_grid_E30.size() - 1;
double Omega03 = Omega03_grid[idx - 1] + (T4 - T4_grid_E30[idx - 1]) / (T4_grid_E30[idx] - T4_grid_E30[idx - 1]) * (Omega03_grid[idx] - Omega03_grid[idx - 1]);
return 8.629e-8 / std::sqrt(T4) * Omega03 / g3_HeI;
}
// Fraction of recombination radiation resulting in hydrogen ionization
double p(double ne, double T4) {
double numerator = 0.75 * A30_HeI;
double denominator = A30_HeI + ne * (k30_HeI(T4) + k31_HeI(T4) + k32_HeI(T4));
double p_value = numerator / denominator + 0.25 * 2 / 3 + 0.75 * ne * k32_HeI(T4) / denominator;
p_value += (0.75 * ne * k31_HeI(T4) / denominator + 0.25 * 1 / 3) * 0.56;
return p_value;
}
// Photoionization cross section for HeI
std::vector<double> sigma_HeI(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 13.61; // eV
const double sigma0 = 949.2; // Mb
const double ya = 1.469;
const double P = 3.188;
const double yw = 2.039;
const double y0 = 0.4434;
const double y1 = 2.136;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] / eV2J;
// Check energy range
if (E < 24.59 || E > 5e4) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Photoionization cross section for HeII
std::vector<double> sigma_HeII(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 1.72; // eV
const double sigma0 = 1.369e4; // Mb
const double ya = 32.88;
const double P = 2.963;
const double yw = 0.0;
const double y0 = 0.0;
const double y1 = 0.0;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] / eV2J;
// Check energy range
if (E < 54.42 || E > 5e4) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double kb = 1.380649e-23; // Boltzmann constant (J/K)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Photoionization cross section for OI
std::vector<double> sigma_OI(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 1.240; // eV
const double sigma0 = 1.745e3; // Mb
const double ya = 3.784;
const double P = 17.64;
const double yw = 7.589e-2;
const double y0 = 8.698;
const double y1 = 1.271e-1;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] * 6.242e18;
// Check energy range
if (E < 13.62 || E > 538) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Other functions and constants for OII section
// Define state degeneracy constants for OII
const int g0_OII = 4;
const int g1_OII = 6;
const int g2_OII = 4;
const int g3_OII = 4;
const int g4_OII = 2;
// Define spontaneous decay rate constants for OII
const double A10_OII = 7.416e-06 + 3.382e-05;
const double A20_OII = 1.414e-04 + 2.209e-05;
const double A21_OII = 1.30e-07 + 1.49e-20;
const double A30_OII = 5.22e-02 + 2.43e-07;
const double A31_OII = 8.37e-03 + 9.07e-02;
const double A32_OII = 1.49e-02 + 3.85e-02;
const double A40_OII = 2.12e-02 + 3.72e-07;
const double A41_OII = 8.34e-03 + 5.19e-02;
const double A42_OII = 9.32e-03 + 7.74e-02;
const double A43_OII = 1.41e-10 + 4.24e-24;
// Level energy constants for OII
const double E10_OII = 38575 * kb; // J
const double E20_OII = 38604 * kb;
const double E30_OII = 58225 * kb;
const double E40_OII = 58228 * kb;
const double E21_OII = E20_OII - E10_OII;
const double E31_OII = E30_OII - E10_OII;
const double E32_OII = E30_OII - E20_OII;
const double E41_OII = E40_OII - E10_OII;
const double E42_OII = E40_OII - E20_OII;
const double E43_OII = E40_OII - E30_OII;
// Level energy frequency constants for OII
const double nu10_OII = E10_OII / h; // Hz
const double nu20_OII = E20_OII / h;
const double nu21_OII = E21_OII / h;
const double nu30_OII = E30_OII / h;
const double nu31_OII = E31_OII / h;
const double nu32_OII = E32_OII / h;
const double nu40_OII = E40_OII / h;
const double nu41_OII = E41_OII / h;
const double nu42_OII = E42_OII / h;
const double nu43_OII = E43_OII / h;
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double kb = 1.380649e-23; // Boltzmann constant (J/K)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Collisional (de-)excitation coefficients for OII
double Omega10_OII(double T4) {
return 0.803 * pow(T4, 0.023 - 0.008 * log(T4));
}
double k10_OII(double T4, int g1_OII) {
return 8.629e-8 / sqrt(T4) * Omega10_OII(T4) / g1_OII; // cm^3 s^-1
}
double k01_OII(double T4, int g1_OII, int g0_OII, double E10_OII) {
return g1_OII / g0_OII * k10_OII(T4, g1_OII) * exp(-E10_OII / (kb * T4 * 1e4));
}
double Omega20_OII(double T4) {
return 0.550 * pow(T4, 0.054 - 0.004 * log(T4));
}
double k20_OII(double T4, int g2_OII) {
return 8.629e-8 / sqrt(T4) * Omega20_OII(T4) / g2_OII; // cm^3 s^-1
}
double k02_OII(double T4, int g2_OII, int g0_OII, double E20_OII) {
return g2_OII / g0_OII * k20_OII(T4, g2_OII) * exp(-E20_OII / (kb * T4 * 1e4));
}
// Define Omega21_OII, k21_OII, k12_OII in a similar manner
double Omega30_OII(double T4) {
return 0.140 * pow(T4, 0.025 - 0.006 * log(T4));
}
double k30_OII(double T4, int g3_OII) {
return 8.629e-8 / sqrt(T4) * Omega30_OII(T4) / g3_OII; // cm^3 s^-1
}
double k03_OII(double T4, int g3_OII, int g0_OII, double E30_OII) {
return g3_OII / g0_OII * k30_OII(T4, g3_OII) * exp(-E30_OII / (kb * T4 * 1e4));
}
// Define Omega31_OII
double Omega31_OII(double T4) {
return 0.349 * pow(T4, 0.060 + 0.052 * log(T4));
}
// Define k31_OII
double k31_OII(double T4, int g3_OII) {
return 8.629e-8 / sqrt(T4) * Omega31_OII(T4) / g3_OII; // cm^3 s^-1
}
// Define k13_OII
double k13_OII(double T4, int g3_OII, int g1_OII, double E31_OII) {
return g3_OII / g1_OII * k31_OII(T4, g3_OII) * exp(-E31_OII / (kb * T4 * 1e4));
}
// Define Omega32_OII
double Omega32_OII(double T4) {
return 0.326 * pow(T4, 0.063 + 0.052 * log(T4));
}
// Define k32_OII
double k32_OII(double T4, int g3_OII, int g2_OII, double E32_OII) {
return 8.629e-8 / sqrt(T4) * Omega32_OII(T4) / g3_OII; // cm^3 s^-1
}
// Define k23_OII
double k23_OII(double T4, int g3_OII, int g2_OII, double E32_OII) {
return g3_OII / g2_OII * k32_OII(T4, g3_OII, g2_OII, E32_OII) * exp(-E32_OII / (kb * T4 * 1e4));
}
// Define Omega40_OII
double Omega40_OII(double T4) {
return 0.283 * pow(T4, 0.023 - 0.004 * log(T4));
}
// Define k40_OII
double k40_OII(double T4, int g4_OII) {
return 8.629e-8 / sqrt(T4) * Omega40_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k04_OII
double k04_OII(double T4, int g4_OII, int g0_OII, double E40_OII) {
return g4_OII / g0_OII * k40_OII(T4, g4_OII) * exp(-E40_OII / (kb * T4 * 1e4));
}
// Define Omega41_OII
double Omega41_OII(double T4) {
return 0.832 * pow(T4, 0.076 + 0.055 * log(T4));
}
// Define k41_OII
double k41_OII(double T4, int g4_OII, int g1_OII, double E41_OII) {
return 8.629e-8 / sqrt(T4) * Omega41_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k14_OII
double k14_OII(double T4, int g4_OII, int g1_OII, double E41_OII) {
return g4_OII / g1_OII * k41_OII(T4, g4_OII, g1_OII, E41_OII) * exp(-E41_OII / (kb * T4 * 1e4));
}
// Define Omega42_OII
double Omega42_OII(double T4) {
return 0.485 * pow(T4, 0.059 + 0.052 * log(T4));
}
// Define k42_OII
double k42_OII(double T4, int g4_OII, int g2_OII, double E42_OII) {
return 8.629e-8 / sqrt(T4) * Omega42_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k24_OII
double k24_OII(double T4, int g4_OII, int g2_OII, double E42_OII) {
return g4_OII / g2_OII * k42_OII(T4, g4_OII, g2_OII, E42_OII) * exp(-E42_OII / (kb * T4 * 1e4));
}
// Define Omega43_OII
double Omega43_OII(double T4) {
return 0.322 * pow(T4, 0.019 + 0.037 * log(T4));
}
// Define k43_OII
double k43_OII(double T4, int g4_OII, int g3_OII, double E43_OII) {
return 8.629e-8 / sqrt(T4) * Omega43_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k34_OII
double k34_OII(double T4, int g4_OII, int g3_OII, double E43_OII) {
return g4_OII / g3_OII * k43_OII(T4, g4_OII, g3_OII, E43_OII) * exp(-E43_OII / (kb * T4 * 1e4));
}
// Define R01_OII
double R01_OII(double ne, double T4) {
return ne * k01_OII(T4);
}
// Define R02_OII
double R02_OII(double ne, double T4) {
return ne * k02_OII(T4);
}
// Define R03_OII
double R03_OII(double ne, double T4) {
return ne * k03_OII(T4);
}
// Define R04_OII
double R04_OII(double ne, double T4) {
return ne * k04_OII(T4);
}
// Define R10_OII
double R10_OII(double ne, double T4) {
return ne * k10_OII(T4) + A10_OII;
}
// Define R12_OII
double R12_OII(double ne, double T4) {
return ne * k12_OII(T4);
}
// Define R13_OII
double R13_OII(double ne, double T4) {
return ne * k13_OII(T4);
}
// Define R14_OII
double R14_OII(double ne, double T4) {
return ne * k14_OII(T4);
}
// Define R20_OII
double R20_OII(double ne, double T4) {
return ne * k20_OII(T4) + A20_OII;
}
// Define R21_OII
double R21_OII(double ne, double T4) {
return ne * k21_OII(T4) + A21_OII;
}
// Define R23_OII
double R23_OII(double ne, double T4) {
return ne * k23_OII(T4);
}
// Define R24_OII
double R24_OII(double ne, double T4) {
return ne * k24_OII(T4);
}
// Define R30_OII
double R30_OII(double ne, double T4) {
return ne * k30_OII(T4) + A30_OII;
}
// Define R31_OII
double R31_OII(double ne, double T4) {
return ne * k31_OII(T4) + A31_OII;
}
// Define R32_OII
double R32_OII(double ne, double T4) {
return ne * k32_OII(T4) + A32_OII;
}
// Define R34_OII
double R34_OII(double ne, double T4) {
return ne * k34_OII(T4);
}
// Define R40_OII
double R40_OII(double ne, double T4) {
return ne * k40_OII(T4) + A40_OII;
}
// Define R41_OII
double R41_OII(double ne, double T4) {
return ne * k41_OII(T4) + A41_OII;
}
// Define R42_OII
double R42_OII(double ne, double T4) {
return ne * k42_OII(T4) + A42_OII;
}
// Define R43_OII
double R43_OII(double ne, double T4) {
return ne * k43_OII(T4) + A43_OII;
}
//Photoionization cross section
// Define constants
const double h = 6.626e-34; // Planck constant in J s
const double kb = 1.381e-23; // Boltzmann constant in J/K
// Define sigma_OII function
std::vector<double> sigma_OII(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Define energy range
const double E_min = 35.12 * h * 6.242e18; // eV
const double E_max = 558.1 * h * 6.242e18; // eV
// Iterate over frequency array
for (size_t i = 0; i < nu.size(); ++i) {
double E = h * nu[i] * 6.242e18; // eV
// Check if energy is within valid range
if (E >= E_min && E <= E_max) {
// Calculate cross section
double E0 = 1.386; // eV
double sigma0 = 5.967 * 10; // Mb
double ya = 3.175 * 10;
double P = 8.943;
double yw = 1.934e-2;
double y0 = 2.131 * 10;
double y1 = 1.503e-2;
double x = E / E0 - y0;
double y_val = std::sqrt(x * x + y1 * y1);
// Calculate cross section using given formula
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y_val, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y_val / ya), -P) * 1e-18;
}
}
return sigma;
}
// OIII data
// State degeneracy
const int g0_OIII = 1;
const int g1_OIII = 3;
const int g2_OIII = 5;
const int g3_OIII = 5;
const int g4_OIII = 1;
// Spontaneous decay rate (s^-1)
const double A10_OIII = 2.6e-5;
const double A20_OIII = 3.5e-11;
const double A21_OIII = 9.8e-5;
const double A30_OIII = 1.9e-6;
const double A31_OIII = 0.0071;
const double A32_OIII = 0.021;
const double A40_OIII = 0;
const double A41_OIII = 0.23;
const double A42_OIII = 7.1e-4;
const double A43_OIII = 1.6;
// Level energy and frequency (in J and Hz respectively)
const double E10_OIII = 163 * kb;
const double E20_OIII = 441 * kb;
const double E30_OIII = 29169 * kb;
const double E40_OIII = 61207 * kb;
const double E21_OIII = E20_OIII - E10_OIII;
const double E31_OIII = E30_OIII - E10_OIII;
const double E32_OIII = E30_OIII - E20_OIII;
const double E41_OIII = E40_OIII - E10_OIII;
const double E42_OIII = E40_OIII - E20_OIII;
const double E43_OIII = E40_OIII - E30_OIII;
const double nu10_OIII = E10_OIII / h;
const double nu20_OIII = E20_OIII / h;
const double nu30_OIII = E30_OIII / h;
const double nu40_OIII = E40_OIII / h;
const double nu21_OIII = E21_OIII / h;
const double nu31_OIII = E31_OIII / h;
const double nu32_OIII = E32_OIII / h;
const double nu41_OIII = E41_OIII / h;
const double nu42_OIII = E42_OIII / h;
const double nu43_OIII = E43_OIII / h;
// OIII collisional (de-)excitation coefficients
// Omega and k functions
auto Omega10_OIII = [](double T4) { return 0.522 * pow(T4, (0.033 - 0.009 * log(T4))); };
auto k10_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega10_OIII(T4) / g1_OIII; };
auto k01_OIII = [](double T4) { return g1_OIII / g0_OIII * k10_OIII(T4) * exp(-E10_OIII / (kb * T4) / 10000); };
auto Omega20_OIII = [](double T4) { return 0.257 * pow(T4, (0.081 + 0.017 * log(T4))); };
auto k20_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega20_OIII(T4) / g2_OIII; };
auto k02_OIII = [](double T4) { return g2_OIII / g0_OIII * k20_OIII(T4) * exp(-E20_OIII / (kb * T4) / 10000); };
auto Omega21_OIII = [](double T4) { return 1.23 * pow(T4, (0.053 + 0.007 * log(T4))); };
auto k21_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega21_OIII(T4) / g2_OIII; };
auto k12_OIII = [](double T4) { return g2_OIII / g1_OIII * k21_OIII(T4) * exp(-E21_OIII / (kb * T4) / 10000); };
auto Omega30_OIII = [](double T4) { return 0.243 * pow(T4, (0.12 + 0.031 * log(T4))); };
auto k30_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega30_OIII(T4) / g3_OIII; };
auto k03_OIII = [](double T4) { return g3_OIII / g0_OIII * k30_OIII(T4) * exp(-E30_OIII / (kb * T4) / 10000); };
auto Omega31_OIII = [](double T4) { return 0.243 * pow(T4, (0.12 + 0.031 * log(T4))) * 3; };
auto k31_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega31_OIII(T4) / g3_OIII; };
auto k13_OIII = [](double T4) { return g3_OIII / g1_OIII * k31_OIII(T4) * exp(-E31_OIII / (kb * T4) / 10000); };
auto Omega32_OIII = [](double T4) { return 0.243 * pow(T4, (0.12 + 0.031 * log(T4))) * 5; };
auto k32_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega32_OIII(T4) / g3_OIII; };
auto k23_OIII = [](double T4) { return g3_OIII / g2_OIII * k32_OIII(T4) * exp(-E32_OIII / (kb * T4) / 10000); };
auto Omega40_OIII = [](double T4) { return 0.0321 * pow(T4, (0.118 + 0.057 * log(T4))); };
auto k40_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega40_OIII(T4) / g4_OIII; };
auto k04_OIII = [](double T4) { return g4_OIII / g0_OIII * k40_OIII(T4) * exp(-E40_OIII / (kb * T4) / 10000); };
auto Omega41_OIII = [](double T4) { return 0.0321 * pow(T4, (0.118 + 0.057 * log(T4))) * 3; };
auto k41_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega41_OIII(T4) / g4_OIII; };
auto k14_OIII = [](double T4) { return g4_OIII / g1_OIII * k41_OIII(T4) * exp(-E41_OIII / (kb * T4) / 10000); };
auto Omega42_OIII = [](double T4) { return 0.0321 * pow(T4, (0.118 + 0.057 * log(T4))) * 5; };
auto k42_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega42_OIII(T4) / g4_OIII; };
auto k24_OIII = [](double T4) { return g4_OIII / g2_OIII * k42_OIII(T4) * exp(-E42_OIII / (kb * T4) / 10000); };
auto Omega43_OIII = [](double T4) { return 0.523 * pow(T4, (0.210 - 0.099 * log(T4))); };
auto k43_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega43_OIII(T4) / g4_OIII; };
auto k34_OIII = [](double T4) { return g4_OIII / g3_OIII * k43_OIII(T4) * exp(-E43_OIII / (kb * T4) / 10000); };
// Five level rates for OIII
auto R01_OIII = [&](double ne, double T4) { return ne * k01_OIII(T4); };
auto R02_OIII = [&](double ne, double T4) { return ne * k02_OIII(T4); };
auto R03_OIII = [&](double ne, double T4) { return ne * k03_OIII(T4); };
auto R04_OIII = [&](double ne, double T4) { return ne * k04_OIII(T4); };
auto R10_OIII = [&](double ne, double T4) { return ne * k10_OIII(T4) + A10_OIII; };
auto R12_OIII = [&](double ne, double T4) { return ne * k12_OIII(T4); };
auto R13_OIII = [&](double ne, double T4) { return ne * k13_OIII(T4); };
auto R14_OIII = [&](double ne, double T4) { return ne * k14_OIII(T4); };
auto R20_OIII = [&](double ne, double T4) { return ne * k20_OIII(T4) + A20_OIII; };
auto R21_OIII = [&](double ne, double T4) { return ne * k21_OIII(T4) + A21_OIII; };
auto R23_OIII = [&](double ne, double T4) { return ne * k23_OIII(T4); };
auto R24_OIII = [&](double ne, double T4) { return ne * k24_OIII(T4); };
auto R30_OIII = [&](double ne, double T4) { return ne * k30_OIII(T4) + A30_OIII; };
auto R31_OIII = [&](double ne, double T4) { return ne * k31_OIII(T4) + A31_OIII; };
auto R32_OIII = [&](double ne, double T4) { return ne * k32_OIII(T4) + A32_OIII; };
auto R34_OIII = [&](double ne, double T4) { return ne * k34_OIII(T4); };
auto R40_OIII = [&](double ne, double T4) { return ne * k40_OIII(T4) + A40_OIII; };
auto R41_OIII = [&](double ne, double T4) { return ne * k41_OIII(T4) + A41_OIII; };
auto R42_OIII = [&](double ne, double T4) { return ne * k42_OIII(T4) + A42_OIII; };
auto R43_OIII = [&](double ne, double T4) { return ne * k43_OIII(T4) + A43_OIII; };
// NII parameters
// State degeneracy
int g0_NII = 1;
int g1_NII = 3;
int g2_NII = 5;
int g3_NII = 5;
int g4_NII = 1;
// Spontaneous decay rates (s^-1)
double A10_NII = 2.08e-6;
double A20_NII = 1.12e-12;
double A21_NII = 7.46e-6;
double A30_NII = 5.25e-7;
double A31_NII = 9.22e-7 + 9.84e-4;
double A32_NII = 8.65e-6 + 2.91e-3;
double A40_NII = 0;
double A41_NII = 3.18e-2;
double A42_NII = 1.55e-4;
double A43_NII = 1.14;
// Level energy and frequency (J and s^-1)
double E10_NII = 70 * kb;
double E20_NII = 188 * kb;
double E30_NII = 22037 * kb;
double E40_NII = 47033 * kb;
double E21_NII = E20_NII - E10_NII;
double E31_NII = E30_NII - E10_NII;
double E32_NII = E30_NII - E20_NII;
double E41_NII = E40_NII - E10_NII;
double E42_NII = E40_NII - E20_NII;
double E43_NII = E40_NII - E30_NII;
double nu10_NII = E10_NII / h;
double nu20_NII = E20_NII / h;
double nu30_NII = E30_NII / h;
double nu40_NII = E40_NII / h;
double nu21_NII = E21_NII / h;
double nu31_NII = E31_NII / h;
double nu32_NII = E32_NII / h;
double nu41_NII = E41_NII / h;
double nu42_NII = E42_NII / h;
double nu43_NII = E43_NII / h;
// Collisional (de-)excitation coefficients for NII
// Omega functions
double Omega10_NII(double T4) { return 0.431 * pow(T4, 0.099 + 0.014 * log(T4)); }
double Omega20_NII(double T4) { return 0.273 * pow(T4, 0.166 + 0.030 * log(T4)); }
double Omega21_NII(double T4) { return 1.15 * pow(T4, 0.137 + 0.024 * log(T4)); }
double Omega30_NII(double T4) { return 0.303 * pow(T4, 0.053 + 0.009 * log(T4)); }
double Omega31_NII(double T4) { return 0.909 * pow(T4, 0.053 + 0.010 * log(T4)); }
double Omega32_NII(double T4) { return 1.51 * pow(T4, 0.054 + 0.011 * log(T4)); }
double Omega40_NII(double T4) { return 0.0352 * pow(T4, 0.066 + 0.018 * log(T4)); }
double Omega41_NII(double T4) { return 0.105 * pow(T4, 0.070 + 0.021 * log(T4)); }
double Omega42_NII(double T4) { return 0.176 * pow(T4, 0.065 + 0.017 * log(T4)); }
double Omega43_NII(double T4) { return 0.806 * pow(T4, -0.175 - 0.014 * log(T4)); }
// Rate coefficients
double k10_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega10_NII(T4) / g1_NII; }
double k01_NII(double T4) { return g1_NII / g0_NII * k10_NII(T4) * exp(-E10_NII / (kb * T4) / 10000); }
double k20_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega20_NII(T4) / g2_NII; }
double k02_NII(double T4) { return g2_NII / g0_NII * k20_NII(T4) * exp(-E20_NII / (kb * T4) / 10000); }
double k21_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega21_NII(T4) / g2_NII; }
double k12_NII(double T4) { return g2_NII / g1_NII * k21_NII(T4) * exp(-E21_NII / (kb * T4) / 10000); }
double k30_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega30_NII(T4) / g3_NII; }
double k03_NII(double T4) { return g3_NII / g0_NII * k30_NII(T4) * exp(-E30_NII / (kb * T4) / 10000); }
double k31_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega31_NII(T4) / g3_NII; }
double k13_NII(double T4) { return g3_NII / g1_NII * k31_NII(T4) * exp(-E31_NII / (kb * T4) / 10000); }
double k32_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega32_NII(T4) / g3_NII; }
double k23_NII(double T4) { return g3_NII / g2_NII * k32_NII(T4) * exp(-E32_NII / (kb * T4) / 10000); }
double k40_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega40_NII(T4) / g4_NII; }
double k04_NII(double T4) { return g4_NII / g0_NII * k40_NII(T4) * exp(-E40_NII / (kb * T4) / 10000); }
double k41_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega41_NII(T4) / g4_NII; }
double k14_NII(double T4) { return g4_NII / g1_NII * k41_NII(T4) * exp(-E41_NII / (kb * T4) / 10000); }
double k42_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega42_NII(T4) / g4_NII; }
double k24_NII(double T4) { return g4_NII / g2_NII * k42_NII(T4) * exp(-E42_NII / (kb * T4) / 10000); }
double k43_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega43_NII(T4) / g4_NII; }
double k34_NII(double T4) { return g4_NII / g3_NII * k43_NII(T4) * exp(-E43_NII / (kb * T4) / 10000); }
// Five-level rates for NII
double R01_NII(double ne, double T4) { return ne * k01_NII(T4); }
double R02_NII(double ne, double T4) { return ne * k02_NII(T4); }
double R03_NII(double ne, double T4) { return ne * k03_NII(T4); }
double R04_NII(double ne, double T4) { return ne * k04_NII(T4); }
double R10_NII(double ne, double T4) { return ne * k10_NII(T4) + A10_NII; }
double R12_NII(double ne, double T4) { return ne * k12_NII(T4); }
double R13_NII(double ne, double T4) { return ne * k13_NII(T4); }
double R14_NII(double ne, double T4) { return ne * k14_NII(T4); }
double R20_NII(double ne, double T4) { return ne * k20_NII(T4) + A20_NII; }
double R21_NII(double ne, double T4) { return ne * k21_NII(T4) + A21_NII; }
double R23_NII(double ne, double T4) { return ne * k23_NII(T4); }
double R24_NII(double ne, double T4) { return ne * k24_NII(T4); }
double R30_NII(double ne, double T4) { return ne * k30_NII(T4) + A30_NII; }
double R31_NII(double ne, double T4) { return ne * k31_NII(T4) + A31_NII; }
double R32_NII(double ne, double T4) { return ne * k32_NII(T4) + A32_NII; }
double R34_NII(double ne, double T4) { return ne * k34_NII(T4); }
double R40_NII(double ne, double T4) { return ne * k40_NII(T4) + A40_NII; }
double R41_NII(double ne, double T4) { return ne * k41_NII(T4) + A41_NII; }
double R42_NII(double ne, double T4) { return ne * k42_NII(T4) + A42_NII; }
double R43_NII(double ne, double T4) { return ne * k43_NII(T4) + A43_NII; }
// Cross section for NI
std::vector<double> sigma_NI(const std::vector<double>& nu) {
std::vector<double> sigma(nu.size(), 0.0);
const double E0 = 4.034; // eV
const double sigma0 = 8.235 * 100; // Mb
const double ya = 8.033 * 10;
const double P = 3.928;
const double yw = 9.097 * pow(10, -2);
const double y0 = 8.598 * pow(10, -1);
const double y1 = 2.325;
for (size_t i = 0; i < nu.size(); ++i) {
double E = h * nu[i] * 6.242 * pow(10, 18);
if (E >= 14.53 && E <= 404.8) {
double x = E / E0 - y0;
double y = sqrt(pow(x, 2) + pow(y1, 2));
sigma[i] = sigma0 * (pow(x - 1, 2) + pow(yw, 2)) * pow(y, 0.5 * P - 5.5) * pow(1 + sqrt(y / ya), -P) * pow(10, -18); // cm^2
}
}
return sigma;
}
// Cross section for NII
std::vector<double> sigma_NII(const std::vector<double>& nu) {
std::vector<double> sigma(nu.size(), 0.0);
const double E0_NII = 6.128 * pow(10, -2); // eV
const double sigma0_NII = 1.944; // Mb
const double ya_NII = 8.163 * pow(10, 2);
const double P_NII = 8.773;
const double yw_NII = 1.043 * pow(10, 1);
const double y0_NII = 4.280 * pow(10, 2);
const double y1_NII = 2.030 * pow(10, 1);
for (size_t i = 0; i < nu.size(); ++i) {
double E_NII = h * nu[i] * 6.242 * pow(10, 18);
if (E_NII >= 29.6 && E_NII <= 423.6) {
double x_NII = E_NII / E0_NII - y0_NII;
double y_NII = sqrt(pow(x_NII, 2) + pow(y1_NII, 2));
sigma[i] = sigma0_NII * (pow(x_NII - 1, 2) + pow(yw_NII, 2)) * pow(y_NII, 0.5 * P_NII - 5.5) * pow(1 + sqrt(y_NII / ya_NII), -P_NII) * pow(10, -18); // cm^2
}
}
return sigma;
}
// Cross section for NIII
std::vector<double> sigma_NIII(const std::vector<double>& nu) {
std::vector<double> sigma(nu.size(), 0.0);
const double E0_NIII = 0.2420; // eV
const double sigma0_NIII = 0.9375; // Mb
const double ya_NIII = 278.8;
const double P_NIII = 9.156;
const double yw_NIII = 1.850;
const double y0_NIII = 187.7;
const double y1_NIII = 3.999;
for (size_t i = 0; i < nu.size(); ++i) {
double E_NIII = h * nu[i] * 6.242 * pow(10, 18);
if (E_NIII >= 47.45 && E_NIII <= 447.3) {
double x_NIII = E_NIII / E0_NIII - y0_NIII;
double y_NIII = sqrt(pow(x_NIII, 2) + pow(y1_NIII, 2));
sigma[i] = sigma0_NIII * (pow(x_NIII - 1, 2) + pow(yw_NIII, 2)) * pow(y_NIII, 0.5 * P_NIII - 5.5) * pow(1 + sqrt(y_NIII / ya_NIII), -P_NIII) * pow(10, -18); // cm^2
}
}
return sigma;
}
create a main funtion for all this script
|
0febb1a557da4e869897ee2b494698ba
|
{
"intermediate": 0.20861786603927612,
"beginner": 0.4287492036819458,
"expert": 0.36263296008110046
}
|
41,084
|
Introduction to metadata tagging and instrument separation
|
e1f7a066ee9c0fa8422e04b05c4628de
|
{
"intermediate": 0.2932806611061096,
"beginner": 0.40020552277565,
"expert": 0.30651384592056274
}
|
41,085
|
ask me a regex question, medium hardness give me string to match
|
ca6fed338004bc62b2b1f750ffcbf36b
|
{
"intermediate": 0.3085944652557373,
"beginner": 0.45791178941726685,
"expert": 0.23349370062351227
}
|
41,086
|
Make this Python code able to accept parallel connections using threads:
server.py:
import socket
# Define host and port for the server
HOST = ‘127.0.0.1’
PORT = 65432
# The very basic “database” of username and password
VALID_USERNAME = “user”
VALID_PASSWORD = “pass”
# Helper function to check credentials
def check_credentials(username, password):
return username == VALID_USERNAME and password == VALID_PASSWORD
# Create socket with IPv4 and TCP
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print(f"Server listening on {HOST}:{PORT}“)
conn, addr = s.accept()
with conn:
print(‘Connected by’, addr)
while True:
# Authentication
username = conn.recv(1024).decode()
password = conn.recv(1024).decode()
if not check_credentials(username, password):
conn.sendall(b"Authentication failed.”)
break
else:
conn.sendall(b"Authentication successful.“)
# Main data receiving loop
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
|
7a65a8e503f6d56829be1c8b2b7ef05c
|
{
"intermediate": 0.44066473841667175,
"beginner": 0.43063342571258545,
"expert": 0.12870179116725922
}
|
41,087
|
# Group by ‘unique_id’ and calculate the length of each group
lengths = y_cl4.groupby('unique_id').agg(pl.count().alias('length'))
# Count the occurrences of each length
counts = lengths.groupby('length').agg(pl.count().alias('count'))
counts = counts.sort('length')
pl.Config.set_tbl_rows(200)
print(counts) # Filter the lengths DataFrame for lengths greater than 31
lengths_filtered = lengths.filter(pl.col('length') > 31)
# y_soldto filtered with only values greater than 31 in length
y_cl4_filtered = y_cl4.join(
lengths_filtered.select(pl.col('unique_id')),
on='unique_id',
how='semi'
)
# Sort by 'WeekDate'
y_cl4_filtered = y_cl4_filtered.sort("ds")
print(y_cl4_filtered)
# Group by ‘unique_id’ and calculate the length of each group
lengths = y_cl4_filtered.groupby('unique_id').agg(pl.count().alias('length'))
# Count the occurrences of each length
counts = lengths.groupby('length').agg(pl.count().alias('count'))
counts = counts.sort('length')
pl.Config.set_tbl_rows(200)
print(counts) # For dataset with length > 8
lengths_1 = y_cl4.groupby('unique_id').agg(pl.count().alias('length'))
lengths_fit_1 = lengths_1.filter(pl.col('length') > 15)
y_cl4_fit_1 = y_cl4.join(
lengths_fit_1.select(pl.col('unique_id')),
on='unique_id',
how='semi'
)
# Remove this series due to it having only 0 values
y_cl4_fit_1 = y_cl4_fit_1.filter(pl.col('unique_id') != '22028504_US01_8_1108389') # Sort, group, and count the lengths of data sequences per unique_id
y_cl4_fit_1 = y_cl4_fit_1.sort("ds")
lengths_1 = y_cl4_fit_1.groupby('unique_id').agg(pl.count().alias('length'))
counts_1 = lengths_1.groupby('length').agg(pl.count().alias('count'))
counts_1 = counts_1.sort('length')
pl.Config.set_tbl_rows(200)
print(counts_1) is there way to have these blocks of code cleaner? basically first block showing all avaialble length for series which is necessary, the seocnd block is only include series 32 lenghs ang higher, can you rename it to something_over 32, third block is using the first block but only include 15 lengths and higher, can you rename this something_over15, last block is to check to make sure dataset over 15 include 15 lengtths and higher, it seems. a lot of lines of code repeated so just wanted them clean up
|
5ec80a0eedd7f0b05cacedb49fbfb587
|
{
"intermediate": 0.27771130204200745,
"beginner": 0.4871826767921448,
"expert": 0.23510603606700897
}
|
41,088
|
I have simple Python socket server. How to check client IP address for blacklist before connection accept ? If IP address blacklisted reject connection
server.py:
import socket
# Define host and port for the server
HOST = ‘127.0.0.1’
PORT = 65432
# The very basic “database” of username and password
VALID_USERNAME = “user”
VALID_PASSWORD = “pass”
# Helper function to check credentials
def check_credentials(username, password):
return username == VALID_USERNAME and password == VALID_PASSWORD
# Create socket with IPv4 and TCP
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print(f"Server listening on {HOST}:{PORT}“)
conn, addr = s.accept()
with conn:
print(‘Connected by’, addr)
while True:
# Authentication
username = conn.recv(1024).decode()
password = conn.recv(1024).decode()
if not check_credentials(username, password):
conn.sendall(b"Authentication failed.”)
break
else:
conn.sendall(b"Authentication successful.“)
# Main data receiving loop
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
|
09d05e9c5f3d52ab731520ae2ec9c700
|
{
"intermediate": 0.3935152292251587,
"beginner": 0.47378817200660706,
"expert": 0.13269661366939545
}
|
41,089
|
can i make 2 separate mp3 file into one file with node js?
|
4bef6c7ac88173a05e858b6b2e6ed9d7
|
{
"intermediate": 0.44794905185699463,
"beginner": 0.23266638815402985,
"expert": 0.31938451528549194
}
|
41,090
|
I have a list of stock returns in a format, like {AAPL:[2003:124, 2004:150,2005:180,2006:190,2007:210], AMZN:[2003:14,2004:20… etc
And I also got a list of market cap percentage of the total market cap of said companies {AAPL:[2003:4%, 2004:7%,2005:5%,2006:7% etc
Create a python script that creates an ETF proportionate to the market cap
|
18853e6292d4c4fe785a3927dbe1bb85
|
{
"intermediate": 0.3844069838523865,
"beginner": 0.33020633459091187,
"expert": 0.2853866219520569
}
|
41,091
|
another way to compute bleu4 import sacrebleu
from nltk.tokenize import word_tokenize
# Load reference answers from tokenized_test_dataset
reference_answers_list = [tokenized_test_dataset[i]['query'] for i in range(500)]
# Assuming df_generated_questions is already defined
# Extract generated questions from the DataFrame
generated_questions_list = df_generated_questions['generated_question'].tolist()
# Tokenize reference answers and generated questions using nltk
tokenized_reference_answers = [word_tokenize(reference) for reference in reference_answers_list]
tokenized_generated_questions = [word_tokenize(generated) for generated in generated_questions_list]
# Compute BLEU-4 score using sacrebleu
bleu_score = sacrebleu.corpus_bleu(tokenized_generated_questions, [tokenized_reference_answers])
# Print the BLEU-4 score
print(f'BLEU-4 Score: {bleu_score.score * 100:.2f}')
|
88eacde631994669d06a86fcb6c84090
|
{
"intermediate": 0.5062448978424072,
"beginner": 0.19551007449626923,
"expert": 0.29824507236480713
}
|
41,092
|
I making http server on Python. Please give me an example of "Hello world" response in HTTP protocol
|
347f847ad42751db7a6b03690ac55b40
|
{
"intermediate": 0.5137112736701965,
"beginner": 0.165639728307724,
"expert": 0.32064902782440186
}
|
41,093
|
What programming language do you think will be most useful considering the domination of ai over the future of software development
|
154acfc20f8ea46afcf83884b59f4353
|
{
"intermediate": 0.28364840149879456,
"beginner": 0.3784700036048889,
"expert": 0.33788153529167175
}
|
41,094
|
import socket
import threading
# Define host and port for the server
HOST = ‘127.0.0.1’
PORT = 65432
# The very basic “database” of username and password
VALID_USERNAME = “user”
VALID_PASSWORD = “pass”
# Helper function to check credentials
def check_credentials(username, password):
return username == VALID_USERNAME and password == VALID_PASSWORD
# Function to handle client connection
def handle_client(conn, addr):
print(f’Connected by {addr}‘)
try:
# Authentication
username = conn.recv(1024).decode()
password = conn.recv(1024).decode()
if not check_credentials(username, password):
conn.sendall(b"Authentication failed.“)
conn.close()
return
else:
conn.sendall(b"Authentication successful.”)
# Main data receiving loop
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
finally:
# Close the connection
conn.close()
# Create socket with IPv4 and TCP
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print(f"Server listening on {HOST}:{PORT}")
# Accept multiple connections in a loop
while True:
# Wait for a new connection
conn, addr = s.accept()
# Start a new thread for the client
client_thread = threading.Thread(target=handle_client, args=(conn, addr))
client_thread.start()
Traceback (most recent call last):
request, (addr, rport) = s.accept()
^^^^^^^^^^
File "C:\Program Files\Python312\Lib\socket.py", line 295, in accept
fd, addr = self._accept()
^^^^^^^^^^^^^^
OSError: [WinError 10038] Сделана попытка выполнить операцию на объекте, не являющемся сокетом
|
9710fb90652535294915454483eca716
|
{
"intermediate": 0.2853626608848572,
"beginner": 0.5921579599380493,
"expert": 0.1224793866276741
}
|
41,095
|
I am making a C++ SDL based game engine. I am going to be posting you my classes and my current project development stage and I want you to evaluate my currente state and evaluate if I need to add something else or if it's enough.
First I am gonna post my current project classes overview:
- BlendMode
- Camera
- Circle
- Color
- Font
- FPoint
- GameScreen
- InputManager
- Line
- Point
- Rect
- Renderer
- ResourceManager
- ScreenManager
- Surface
- Texture
- Transform
- Vertex
- Window
|
82bf39642407511f607a951ce4dce264
|
{
"intermediate": 0.16615045070648193,
"beginner": 0.5816498398780823,
"expert": 0.25219976902008057
}
|
41,096
|
import requests
import pandas as pd
from time import sleep
# مجموعة لتخزين الأحداث المطبوعة بالفعل لتجنب التكرار
printed_events = set()
def fetch_live_players(printed_events):
fixtures_url = 'https://fantasy.premierleague.com/api/fixtures/'
fixtures_response = requests.get(fixtures_url)
players_url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
players_response = requests.get(players_url)
if fixtures_response.status_code == 200 and players_response.status_code == 200:
fixtures_data = fixtures_response.json()
players_data = players_response.json()
live_fixtures = [fixture for fixture in fixtures_data if not fixture['finished'] and fixture['started']]
players_df = pd.DataFrame(players_data['elements'])
teams_df = pd.DataFrame(players_data['teams'])
if live_fixtures:
for fixture in live_fixtures:
event_id = fixture['event']
live_url = f'https://fantasy.premierleague.com/api/event/{event_id}/live/'
live_response = requests.get(live_url)
if live_response.status_code == 200:
live_data = live_response.json()['elements']
for element in live_data:
element_info = players_df.loc[players_df['id'] == element['id']].iloc[0]
team_info = teams_df.loc[teams_df['id'] == element_info['team']].iloc[0]
player_name = element_info['web_name']
stats = element['stats']
# تحقق من الأهداف
if stats['goals_scored'] > 0:
event_key = f"{player_name}-goal-{stats['goals_scored']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Goal⚽: {player_name}, {team_info['name']}- P: {stats['goals_scored']*6}, Tot: {stats['total_points']}")
# تحقق من الأسيست
if stats['assists'] > 0:
event_key = f"{player_name}-assist-{stats['assists']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Assist🅰️: {player_name}, {team_info['name']}- P: {stats['assists']*3}, Tot: {stats['total_points']}")
# تحقق من البطاقات الصفراء
if stats['yellow_cards'] > 0:
event_key = f"{player_name}-yellow-{stats['yellow_cards']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Yellow Card🟨: #{player_name}, #{team_info['name']}- P: -{stats['yellow_cards']*1}, Tot: {stats['total_points']}")
# تحقق من البطاقات الحمراء
if stats['red_cards'] > 0:
event_key = f"{player_name}-red-{stats['red_cards']}"
if event_key not in printed_events:
printed_events.add(event_key)
print(f"Red Card: {player_name}, {team_info['name']}, Event Points: -{stats['red_cards']*3}, Total Points: {stats['total_points']}")
else:
print('Failed to retrieve data.')
# نقطة بدء البرنامج
def main():
while True:
try:
fetch_live_players(printed_events)
except Exception as e:
print(e) # لدينا الآن الخطأ المطبوع في حال حدوث أي استثناء
sleep(60) # فترة الانتظار بين كل تحقق هي 60 ثانية
if __name__ == "__main__":
main() بمجرد طباعة حدث جديد في الشيفرة اريد ان يتم نشره مباشرة في فايس بوك .مع العلم عندي تطبيق فايسبوك على موقع المطور والتوكن وكل شئ
|
23c05d917486cf9af8ba65bac86b5534
|
{
"intermediate": 0.40353623032569885,
"beginner": 0.3928612172603607,
"expert": 0.20360258221626282
}
|
41,097
|
net.ipv4.tcp_moderate_rcvbuf?
|
aa5bdc5ec143ef5c2f05ca9e20d48cc2
|
{
"intermediate": 0.3461231589317322,
"beginner": 0.3217097818851471,
"expert": 0.33216702938079834
}
|
41,098
|
TWrite the explanationatory tweet and first tweet for PEATO [Pacific East Asian Treaty Organization], a version of ASEAN [PATO[ for all of East Asia in the form of a modernized version of the WW2-era Greater East Asian Co-Prosperity Sphere as "NATO but for East Asia and actually useful", with Japan, Taiwan and the members of the Manchukuo government in exile as main and starting members as a defensive front against the PRC in mainland China, the KMT remnants in Taiwan and modern-day Russia. The goal of PEATO is to form a collective defence for East Asia and revive and promote monarchy in China as an alternative to the ROC and PRC.
|
5bf3e60e37397083b26ac6610c33bd9b
|
{
"intermediate": 0.29801157116889954,
"beginner": 0.28023844957351685,
"expert": 0.42174994945526123
}
|
41,099
|
Given the following BL statements, draw the corresponding abstract syntax trees as defined by the mathematical model of StatementKernel. See Slides 5-12 in Abstract Syntax Trees for some examples.
IF next-is-empty THEN
move
ELSE
IF next-is-wall THEN
turnright
turnright
move
END IF
END IF
WHILE true DO
turnright
IF next-is-enemy THEN
TurnAround
ELSE
skip
END IF
turnleft
END WHILE
WHILE next-is-enemy DO
infect
TurnAround
move
turnright
END WHILE
IF next-is-friend THEN
turnright
turnright
WHILE true DO
infect
END WHILE
END IF
IF next-is-not-empty THEN
turnleft
turnleft
ELSE
WHILE next-is-empty DO
move
END WHILE
IF next-is-enemy THEN
infect
END IF
skip
END IF
Using recursion, complete the body of the following static method. Note the use of a Java switch statement. See the Statement slides (44-49) for the syntax, purpose, and behavior of this construct.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
/**
* Reports the number of calls to primitive instructions (move, turnleft,
* turnright, infect, skip) in a given {@code Statement}.
*
* @param s
* the {@code Statement}
* @return the number of calls to primitive instructions in {@code s}
* @ensures <pre>
* countOfPrimitiveCalls =
* [number of calls to primitive instructions in s]
* </pre>
*/
public static int countOfPrimitiveCalls(Statement s) {
int count = 0;
switch (s.kind()) {
case BLOCK: {
/*
* Add up the number of calls to primitive instructions
* in each nested statement in the BLOCK.
*/
// TODO - fill in case
break;
}
case IF: {
/*
* Find the number of calls to primitive instructions in
* the body of the IF.
*/
// TODO - fill in case
break;
}
case IF_ELSE: {
/*
* Add up the number of calls to primitive instructions in
* the "then" and "else" bodies of the IF_ELSE.
*/
// TODO - fill in case
break;
}
case WHILE: {
/*
* Find the number of calls to primitive instructions in
* the body of the WHILE.
*/
// TODO - fill in case
break;
}
case CALL: {
/*
* This is a leaf: the count can only be 1 or 0. Determine
* whether this is a call to a primitive instruction or not.
*/
// TODO - fill in case
break;
}
default: {
// this will never happen...can you explain why?
break;
}
}
return count;
}
|
14fd880a11cae8b838f1daf2388ec384
|
{
"intermediate": 0.3227166533470154,
"beginner": 0.3454081118106842,
"expert": 0.3318752646446228
}
|
41,100
|
write a program to solo mine bitcoin
|
c61b5d338a3b7cb83927f2d7a4889347
|
{
"intermediate": 0.22324545681476593,
"beginner": 0.14153291285037994,
"expert": 0.6352216601371765
}
|
41,101
|
TASK 1. Calculate the number of arithmetic operations that are required to solve a tridiagonal system
𝑇𝒙 = 𝒇 of size 𝑛 using the Thomas algorithm. Based on this result, determine the asymptotic
complexity of the Thomas algorithm, and compare it to the asymptotic complexity of the standard
Gaussian elimination.
TASK 2. Rewrite the Thomas algorithm in terms of the arrays 𝐴,𝐵, and 𝐶 introduced to store the matrix
𝑇 efficiently.
TASK 3. Implement the Thomas algorithm from TASK 2 as a Python function. The input parameters to
the function should be the coefficient matrix 𝑇 (stored as three arrays 𝐴,𝐵, and 𝐶) and the right-hand
side vector 𝒇. The output should be the solution vector 𝒙. The coefficient matrix and the right-hand
side should be defined in the main script and passed to the function that solves the system.
TASK 4. Test your code by solving the linear system of size 𝑛 = 10 with the values 𝐴 = 2, and 𝐵 = 𝐶 =
−1. Set the right-hand side to 𝒇 = 𝟏. To verify the correctness of your code, compare the solution
vector obtained from the Thomas algorithm to that obtained by applying the direct solver
numpy.linalg.solve(). For the latter, the coefficient matrix should be assembled.
TASK 5. Solve five linear systems 𝑇𝒙 = 𝒇 with 𝐴 = 2, 𝐵 = 𝐶 = −1 and 𝒇 = 𝟏 varying the problem size
𝑛 between 106
and 108
. Record the execution times in seconds for each case. To accomplish this task,
explore the Python function timer() from the package timeit (refer to the code for matrix
multiplication covered in lectures). Plot a graph where the obtained execution times are represented
as the function of the problem size 𝑛. What are your conclusions about the cost of the Thomas
algorithm?
|
a63b7242b9b2f93b8af8493a1e18ee20
|
{
"intermediate": 0.3241312503814697,
"beginner": 0.12740445137023926,
"expert": 0.548464298248291
}
|
41,102
|
I am writing a README.md and did some tests. I want to put a table in the readme, this is the data:
odp sd
gff3 4.304 0.02880972058
gff3.gz 6.272 0.1765502761
gxf2chrom sd
gff3 1.87652 0.01444115646
gff3.gz 2.05 0.007071067812
gtf 1.83356 0.008381407996
gtf.gz 1.93548 0.01314865012
"odp" and "gxf2chrom" are 2 different tools. "gxf2chrom" seems to be a lot faster and also provides additional format support. I want 1) write the table in the README and 2) also display a statistical test result to see if the differences are significant (which one shoul I use)?
|
5f8228bd92c053678313fe63c7066660
|
{
"intermediate": 0.4272286295890808,
"beginner": 0.22168557345867157,
"expert": 0.3510858118534088
}
|
41,103
|
#include <iostream>
#include <cmath>
#include <vector>
#include <algorithm>
// Recombination coefficients
double alphaB_HI(double T4) {
return 2.59e-13 * pow(T4, (-0.833 - 0.034 * log(T4))); // cm^3 s^-1
}
double alphaB_HeI(double T4) {
return 2.72e-13 * pow(T4, -0.789); // cm^3 s^-1
}
double alphaB_Halpha(double T4) {
return 1.17e-13 * pow(T4, (-0.942 - 0.031 * log(T4))); // cm^3 s^-1
}
double alphaB_Hbeta(double T4) {
return 3.03e-14 * pow(T4, (-0.874 - 0.058 * log(T4))); // cm^3 s^-1
}
double alpha1_HeI(double T4) {
return 1.54e-13 * pow(T4, -0.486); // cm^3 s^-1
}
double alphaA_HeI(double T4) {
return alphaB_HeI(T4) + alpha1_HeI(T4);
}
//#Osterbrock Table A5.1
//alphaB_OIII = 3.66*10**-12 #cm^3 s^-1
//alphaB_OII = 3.99*10**-13 #cm^3 s^-1
//RR: Badnel2006 https://iopscience.iop.org/article/10.1086/508465
//DR: Badnell,N.R.1986,J.Phys.B,19,3827. 2006a
//RR and DR summarized in Aaron Smith COLT bitbucket
//https://bitbucket.org/aaron_smith/colt/src/fb0cd32aeadaedce637a2df46780b1a71a1d3864/src/rates.h
//#########################Oxygen#########################################
double alpha_RR_OIII(double T4) {
double T = T4 * 10000;
double ST0 = sqrt(T / 0.1602);
double ST1 = sqrt(T / 4.377e6);
double Bp = 0.7668 + 0.107 * exp(-139200. / T);
return 2.096e-9 / (ST0 * pow((1 + ST0), (1 - Bp)) * pow((1 + ST1), (1 + Bp)));
}
double alpha_RR_OII(double T4) {
double T = T4 * 10000;
double ST0 = sqrt(T / 4.136);
double ST1 = sqrt(T / 4.214e6);
double Bp = 0.6109 + 0.4093 * exp(-87700. / T);
return 6.622e-11 / (ST0 * pow((1 + ST0), (1 - Bp)) * pow((1 + ST1), (1 + Bp)));
}
double alpha_DR_OIII(double T4) {
double T = T4 * 10000;
return pow(T, -1.5) * (1.627e-7 * exp(-45.35 / T) + 1.262e-7 * exp(-284.7 / T) + 6.663e-7 * exp(-4166. / T) + 3.925e-6 * exp(-28770. / T) + 0.002406 * exp(-195300. / T) + 0.001146 * exp(-364600. / T));
}
double alpha_DR_OII(double T4) {
double T = T4 * 10000;
return pow(T, -1.5) * (5.629e-8 * exp(-5395. / T) + 2.55e-7 * exp(-17700. / T) + 0.0006173 * exp(-167100. / T) + 0.0001627 * exp(-268700. / T));
}
double alphaB_OIII(double T4) {
return alpha_RR_OIII(T4) + alpha_DR_OIII(T4);
}
double alphaB_OII(double T4) {
return alpha_RR_OII(T4) + alpha_DR_OII(T4);
}
double delta_OII = 1.05e-9; // cm^3 s^-1
double delta_OI = 1.04e-9; // cm^3 s^-1
double k0_OI_ct(double T4) {
return 1.14e-9 * pow(T4, 0.4 + 0.018 * log(T4));
}
double k1_OI_ct(double T4) {
return 3.44e-10 * pow(T4, 0.451 + 0.036 * log(T4));
}
double k2_OI_ct(double T4) {
return 5.33e-10 * pow(T4, 0.384 + 0.024 * log(T4)) * exp(-97 / T4 / 10000);
}
double k0r_OI_ct(double T4) {
return 8.0 / 5.0 * k0_OI_ct(T4) * exp(-229 / T4 / 10000);
}
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Photoionization cross section for HI
std::vector<double> sigma_HI(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 4.298e-1; // eV
const double sigma0 = 5.475e4; // Mb
const double ya = 3.288e1;
const double P = 2.963;
const double yw = 0.0;
const double y0 = 0.0;
const double y1 = 0.0;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] / eV2J;
// Check energy range
if (E < 13.6 || E > 5e4) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Constants for H-alpha and H-beta frequencies
const double nu_Halpha = 1.89 * eV2J / h; // Hz
const double nu_Hbeta = 2.55 * eV2J / h; // Hz
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// States and constants for HeI
const int g0_HeI = 1;
const int g1_HeI = 1;
const int g2_HeI = 3;
const int g3_HeI = 3;
const double A30_HeI = 1.26e-4; // s^-1
// Interpolation functions for collisional coefficients
std::vector<double> T4_grid = {0.6000, 0.8000, 1.0000, 1.5000, 2.0000, 2.5000};
std::vector<double> k31_grid = {1.95e-8, 2.45e-8, 2.60e-8, 3.05e-8, 2.55e-8, 2.68e-8};
std::vector<double> k32_grid = {2.34e-9, 3.64e-9, 5.92e-9, 7.83e-9, 9.23e-9, 9.81e-9};
std::vector<double> T4_grid_E30 = {3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.25, 5.50, 5.75};
std::vector<double> Omega03_grid = {6.198e-2, 6.458e-2, 6.387e-2, 6.157e-2, 5.832e-2, 5.320e-2, 4.787e-2, 4.018e-2, 3.167e-2};
double k31_HeI(double T4) {
auto it = std::upper_bound(T4_grid.begin(), T4_grid.end(), T4);
int idx = std::distance(T4_grid.begin(), it);
if (idx == 0) idx = 1;
if (idx == T4_grid.size()) idx = T4_grid.size() - 1;
double k31 = k31_grid[idx - 1] + (T4 - T4_grid[idx - 1]) / (T4_grid[idx] - T4_grid[idx - 1]) * (k31_grid[idx] - k31_grid[idx - 1]);
return k31;
}
double k32_HeI(double T4) {
auto it = std::upper_bound(T4_grid.begin(), T4_grid.end(), T4);
int idx = std::distance(T4_grid.begin(), it);
if (idx == 0) idx = 1;
if (idx == T4_grid.size()) idx = T4_grid.size() - 1;
double k32 = k32_grid[idx - 1] + (T4 - T4_grid[idx - 1]) / (T4_grid[idx] - T4_grid[idx - 1]) * (k32_grid[idx] - k32_grid[idx - 1]);
return k32;
}
double k30_HeI(double T4) {
auto it = std::upper_bound(T4_grid_E30.begin(), T4_grid_E30.end(), T4);
int idx = std::distance(T4_grid_E30.begin(), it);
if (idx == 0) idx = 1;
if (idx == T4_grid_E30.size()) idx = T4_grid_E30.size() - 1;
double Omega03 = Omega03_grid[idx - 1] + (T4 - T4_grid_E30[idx - 1]) / (T4_grid_E30[idx] - T4_grid_E30[idx - 1]) * (Omega03_grid[idx] - Omega03_grid[idx - 1]);
return 8.629e-8 / std::sqrt(T4) * Omega03 / g3_HeI;
}
// Fraction of recombination radiation resulting in hydrogen ionization
double p(double ne, double T4) {
double numerator = 0.75 * A30_HeI;
double denominator = A30_HeI + ne * (k30_HeI(T4) + k31_HeI(T4) + k32_HeI(T4));
double p_value = numerator / denominator + 0.25 * 2 / 3 + 0.75 * ne * k32_HeI(T4) / denominator;
p_value += (0.75 * ne * k31_HeI(T4) / denominator + 0.25 * 1 / 3) * 0.56;
return p_value;
}
// Photoionization cross section for HeI
std::vector<double> sigma_HeI(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 13.61; // eV
const double sigma0 = 949.2; // Mb
const double ya = 1.469;
const double P = 3.188;
const double yw = 2.039;
const double y0 = 0.4434;
const double y1 = 2.136;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] / eV2J;
// Check energy range
if (E < 24.59 || E > 5e4) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Photoionization cross section for HeII
std::vector<double> sigma_HeII(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 1.72; // eV
const double sigma0 = 1.369e4; // Mb
const double ya = 32.88;
const double P = 2.963;
const double yw = 0.0;
const double y0 = 0.0;
const double y1 = 0.0;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] / eV2J;
// Check energy range
if (E < 54.42 || E > 5e4) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double kb = 1.380649e-23; // Boltzmann constant (J/K)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Photoionization cross section for OI
std::vector<double> sigma_OI(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Constants for calculation
const double E0 = 1.240; // eV
const double sigma0 = 1.745e3; // Mb
const double ya = 3.784;
const double P = 17.64;
const double yw = 7.589e-2;
const double y0 = 8.698;
const double y1 = 1.271e-1;
// Loop through frequencies
for (size_t i = 0; i < nu.size(); ++i) {
// Convert frequency to energy
double E = h * nu[i] * 6.242e18;
// Check energy range
if (E < 13.62 || E > 538) {
sigma[i] = 0.0;
} else {
// Calculate sigma for valid energy range
double x = E / E0 - y0;
double y = std::sqrt(x * x + y1 * y1);
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y / ya), -P) * 1e-18; // cm^2
}
}
return sigma;
}
// Other functions and constants for OII section
// Define state degeneracy constants for OII
const int g0_OII = 4;
const int g1_OII = 6;
const int g2_OII = 4;
const int g3_OII = 4;
const int g4_OII = 2;
// Define spontaneous decay rate constants for OII
const double A10_OII = 7.416e-06 + 3.382e-05;
const double A20_OII = 1.414e-04 + 2.209e-05;
const double A21_OII = 1.30e-07 + 1.49e-20;
const double A30_OII = 5.22e-02 + 2.43e-07;
const double A31_OII = 8.37e-03 + 9.07e-02;
const double A32_OII = 1.49e-02 + 3.85e-02;
const double A40_OII = 2.12e-02 + 3.72e-07;
const double A41_OII = 8.34e-03 + 5.19e-02;
const double A42_OII = 9.32e-03 + 7.74e-02;
const double A43_OII = 1.41e-10 + 4.24e-24;
// Level energy constants for OII
const double E10_OII = 38575 * kb; // J
const double E20_OII = 38604 * kb;
const double E30_OII = 58225 * kb;
const double E40_OII = 58228 * kb;
const double E21_OII = E20_OII - E10_OII;
const double E31_OII = E30_OII - E10_OII;
const double E32_OII = E30_OII - E20_OII;
const double E41_OII = E40_OII - E10_OII;
const double E42_OII = E40_OII - E20_OII;
const double E43_OII = E40_OII - E30_OII;
// Level energy frequency constants for OII
const double nu10_OII = E10_OII / h; // Hz
const double nu20_OII = E20_OII / h;
const double nu21_OII = E21_OII / h;
const double nu30_OII = E30_OII / h;
const double nu31_OII = E31_OII / h;
const double nu32_OII = E32_OII / h;
const double nu40_OII = E40_OII / h;
const double nu41_OII = E41_OII / h;
const double nu42_OII = E42_OII / h;
const double nu43_OII = E43_OII / h;
// Constants
const double h = 6.62607015e-34; // Planck constant (J s)
const double kb = 1.380649e-23; // Boltzmann constant (J/K)
const double eV2J = 1.602176634e-19; // eV to Joules conversion factor
// Collisional (de-)excitation coefficients for OII
double Omega10_OII(double T4) {
return 0.803 * pow(T4, 0.023 - 0.008 * log(T4));
}
double k10_OII(double T4, int g1_OII) {
return 8.629e-8 / sqrt(T4) * Omega10_OII(T4) / g1_OII; // cm^3 s^-1
}
double k01_OII(double T4, int g1_OII, int g0_OII, double E10_OII) {
return g1_OII / g0_OII * k10_OII(T4, g1_OII) * exp(-E10_OII / (kb * T4 * 1e4));
}
double Omega20_OII(double T4) {
return 0.550 * pow(T4, 0.054 - 0.004 * log(T4));
}
double k20_OII(double T4, int g2_OII) {
return 8.629e-8 / sqrt(T4) * Omega20_OII(T4) / g2_OII; // cm^3 s^-1
}
double k02_OII(double T4, int g2_OII, int g0_OII, double E20_OII) {
return g2_OII / g0_OII * k20_OII(T4, g2_OII) * exp(-E20_OII / (kb * T4 * 1e4));
}
// Define Omega21_OII, k21_OII, k12_OII in a similar manner
double Omega30_OII(double T4) {
return 0.140 * pow(T4, 0.025 - 0.006 * log(T4));
}
double k30_OII(double T4, int g3_OII) {
return 8.629e-8 / sqrt(T4) * Omega30_OII(T4) / g3_OII; // cm^3 s^-1
}
double k03_OII(double T4, int g3_OII, int g0_OII, double E30_OII) {
return g3_OII / g0_OII * k30_OII(T4, g3_OII) * exp(-E30_OII / (kb * T4 * 1e4));
}
// Define Omega31_OII
double Omega31_OII(double T4) {
return 0.349 * pow(T4, 0.060 + 0.052 * log(T4));
}
// Define k31_OII
double k31_OII(double T4, int g3_OII) {
return 8.629e-8 / sqrt(T4) * Omega31_OII(T4) / g3_OII; // cm^3 s^-1
}
// Define k13_OII
double k13_OII(double T4, int g3_OII, int g1_OII, double E31_OII) {
return g3_OII / g1_OII * k31_OII(T4, g3_OII) * exp(-E31_OII / (kb * T4 * 1e4));
}
// Define Omega32_OII
double Omega32_OII(double T4) {
return 0.326 * pow(T4, 0.063 + 0.052 * log(T4));
}
// Define k32_OII
double k32_OII(double T4, int g3_OII, int g2_OII, double E32_OII) {
return 8.629e-8 / sqrt(T4) * Omega32_OII(T4) / g3_OII; // cm^3 s^-1
}
// Define k23_OII
double k23_OII(double T4, int g3_OII, int g2_OII, double E32_OII) {
return g3_OII / g2_OII * k32_OII(T4, g3_OII, g2_OII, E32_OII) * exp(-E32_OII / (kb * T4 * 1e4));
}
// Define Omega40_OII
double Omega40_OII(double T4) {
return 0.283 * pow(T4, 0.023 - 0.004 * log(T4));
}
// Define k40_OII
double k40_OII(double T4, int g4_OII) {
return 8.629e-8 / sqrt(T4) * Omega40_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k04_OII
double k04_OII(double T4, int g4_OII, int g0_OII, double E40_OII) {
return g4_OII / g0_OII * k40_OII(T4, g4_OII) * exp(-E40_OII / (kb * T4 * 1e4));
}
// Define Omega41_OII
double Omega41_OII(double T4) {
return 0.832 * pow(T4, 0.076 + 0.055 * log(T4));
}
// Define k41_OII
double k41_OII(double T4, int g4_OII, int g1_OII, double E41_OII) {
return 8.629e-8 / sqrt(T4) * Omega41_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k14_OII
double k14_OII(double T4, int g4_OII, int g1_OII, double E41_OII) {
return g4_OII / g1_OII * k41_OII(T4, g4_OII, g1_OII, E41_OII) * exp(-E41_OII / (kb * T4 * 1e4));
}
// Define Omega42_OII
double Omega42_OII(double T4) {
return 0.485 * pow(T4, 0.059 + 0.052 * log(T4));
}
// Define k42_OII
double k42_OII(double T4, int g4_OII, int g2_OII, double E42_OII) {
return 8.629e-8 / sqrt(T4) * Omega42_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k24_OII
double k24_OII(double T4, int g4_OII, int g2_OII, double E42_OII) {
return g4_OII / g2_OII * k42_OII(T4, g4_OII, g2_OII, E42_OII) * exp(-E42_OII / (kb * T4 * 1e4));
}
// Define Omega43_OII
double Omega43_OII(double T4) {
return 0.322 * pow(T4, 0.019 + 0.037 * log(T4));
}
// Define k43_OII
double k43_OII(double T4, int g4_OII, int g3_OII, double E43_OII) {
return 8.629e-8 / sqrt(T4) * Omega43_OII(T4) / g4_OII; // cm^3 s^-1
}
// Define k34_OII
double k34_OII(double T4, int g4_OII, int g3_OII, double E43_OII) {
return g4_OII / g3_OII * k43_OII(T4, g4_OII, g3_OII, E43_OII) * exp(-E43_OII / (kb * T4 * 1e4));
}
// Define R01_OII
double R01_OII(double ne, double T4) {
return ne * k01_OII(T4);
}
// Define R02_OII
double R02_OII(double ne, double T4) {
return ne * k02_OII(T4);
}
// Define R03_OII
double R03_OII(double ne, double T4) {
return ne * k03_OII(T4);
}
// Define R04_OII
double R04_OII(double ne, double T4) {
return ne * k04_OII(T4);
}
// Define R10_OII
double R10_OII(double ne, double T4) {
return ne * k10_OII(T4) + A10_OII;
}
// Define R12_OII
double R12_OII(double ne, double T4) {
return ne * k12_OII(T4);
}
// Define R13_OII
double R13_OII(double ne, double T4) {
return ne * k13_OII(T4);
}
// Define R14_OII
double R14_OII(double ne, double T4) {
return ne * k14_OII(T4);
}
// Define R20_OII
double R20_OII(double ne, double T4) {
return ne * k20_OII(T4) + A20_OII;
}
// Define R21_OII
double R21_OII(double ne, double T4) {
return ne * k21_OII(T4) + A21_OII;
}
// Define R23_OII
double R23_OII(double ne, double T4) {
return ne * k23_OII(T4);
}
// Define R24_OII
double R24_OII(double ne, double T4) {
return ne * k24_OII(T4);
}
// Define R30_OII
double R30_OII(double ne, double T4) {
return ne * k30_OII(T4) + A30_OII;
}
// Define R31_OII
double R31_OII(double ne, double T4) {
return ne * k31_OII(T4) + A31_OII;
}
// Define R32_OII
double R32_OII(double ne, double T4) {
return ne * k32_OII(T4) + A32_OII;
}
// Define R34_OII
double R34_OII(double ne, double T4) {
return ne * k34_OII(T4);
}
// Define R40_OII
double R40_OII(double ne, double T4) {
return ne * k40_OII(T4) + A40_OII;
}
// Define R41_OII
double R41_OII(double ne, double T4) {
return ne * k41_OII(T4) + A41_OII;
}
// Define R42_OII
double R42_OII(double ne, double T4) {
return ne * k42_OII(T4) + A42_OII;
}
// Define R43_OII
double R43_OII(double ne, double T4) {
return ne * k43_OII(T4) + A43_OII;
}
//Photoionization cross section
// Define constants
const double h = 6.626e-34; // Planck constant in J s
const double kb = 1.381e-23; // Boltzmann constant in J/K
// Define sigma_OII function
std::vector<double> sigma_OII(const std::vector<double>& nu) {
// Define cross section vector
std::vector<double> sigma(nu.size(), 0.0);
// Define energy range
const double E_min = 35.12 * h * 6.242e18; // eV
const double E_max = 558.1 * h * 6.242e18; // eV
// Iterate over frequency array
for (size_t i = 0; i < nu.size(); ++i) {
double E = h * nu[i] * 6.242e18; // eV
// Check if energy is within valid range
if (E >= E_min && E <= E_max) {
// Calculate cross section
double E0 = 1.386; // eV
double sigma0 = 5.967 * 10; // Mb
double ya = 3.175 * 10;
double P = 8.943;
double yw = 1.934e-2;
double y0 = 2.131 * 10;
double y1 = 1.503e-2;
double x = E / E0 - y0;
double y_val = std::sqrt(x * x + y1 * y1);
// Calculate cross section using given formula
sigma[i] = sigma0 * ((x - 1) * (x - 1) + yw * yw) * std::pow(y_val, 0.5 * P - 5.5) * std::pow(1 + std::sqrt(y_val / ya), -P) * 1e-18;
}
}
return sigma;
}
// OIII data
// State degeneracy
const int g0_OIII = 1;
const int g1_OIII = 3;
const int g2_OIII = 5;
const int g3_OIII = 5;
const int g4_OIII = 1;
// Spontaneous decay rate (s^-1)
const double A10_OIII = 2.6e-5;
const double A20_OIII = 3.5e-11;
const double A21_OIII = 9.8e-5;
const double A30_OIII = 1.9e-6;
const double A31_OIII = 0.0071;
const double A32_OIII = 0.021;
const double A40_OIII = 0;
const double A41_OIII = 0.23;
const double A42_OIII = 7.1e-4;
const double A43_OIII = 1.6;
// Level energy and frequency (in J and Hz respectively)
const double E10_OIII = 163 * kb;
const double E20_OIII = 441 * kb;
const double E30_OIII = 29169 * kb;
const double E40_OIII = 61207 * kb;
const double E21_OIII = E20_OIII - E10_OIII;
const double E31_OIII = E30_OIII - E10_OIII;
const double E32_OIII = E30_OIII - E20_OIII;
const double E41_OIII = E40_OIII - E10_OIII;
const double E42_OIII = E40_OIII - E20_OIII;
const double E43_OIII = E40_OIII - E30_OIII;
const double nu10_OIII = E10_OIII / h;
const double nu20_OIII = E20_OIII / h;
const double nu30_OIII = E30_OIII / h;
const double nu40_OIII = E40_OIII / h;
const double nu21_OIII = E21_OIII / h;
const double nu31_OIII = E31_OIII / h;
const double nu32_OIII = E32_OIII / h;
const double nu41_OIII = E41_OIII / h;
const double nu42_OIII = E42_OIII / h;
const double nu43_OIII = E43_OIII / h;
// OIII collisional (de-)excitation coefficients
// Omega and k functions
auto Omega10_OIII = [](double T4) { return 0.522 * pow(T4, (0.033 - 0.009 * log(T4))); };
auto k10_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega10_OIII(T4) / g1_OIII; };
auto k01_OIII = [](double T4) { return g1_OIII / g0_OIII * k10_OIII(T4) * exp(-E10_OIII / (kb * T4) / 10000); };
auto Omega20_OIII = [](double T4) { return 0.257 * pow(T4, (0.081 + 0.017 * log(T4))); };
auto k20_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega20_OIII(T4) / g2_OIII; };
auto k02_OIII = [](double T4) { return g2_OIII / g0_OIII * k20_OIII(T4) * exp(-E20_OIII / (kb * T4) / 10000); };
auto Omega21_OIII = [](double T4) { return 1.23 * pow(T4, (0.053 + 0.007 * log(T4))); };
auto k21_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega21_OIII(T4) / g2_OIII; };
auto k12_OIII = [](double T4) { return g2_OIII / g1_OIII * k21_OIII(T4) * exp(-E21_OIII / (kb * T4) / 10000); };
auto Omega30_OIII = [](double T4) { return 0.243 * pow(T4, (0.12 + 0.031 * log(T4))); };
auto k30_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega30_OIII(T4) / g3_OIII; };
auto k03_OIII = [](double T4) { return g3_OIII / g0_OIII * k30_OIII(T4) * exp(-E30_OIII / (kb * T4) / 10000); };
auto Omega31_OIII = [](double T4) { return 0.243 * pow(T4, (0.12 + 0.031 * log(T4))) * 3; };
auto k31_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega31_OIII(T4) / g3_OIII; };
auto k13_OIII = [](double T4) { return g3_OIII / g1_OIII * k31_OIII(T4) * exp(-E31_OIII / (kb * T4) / 10000); };
auto Omega32_OIII = [](double T4) { return 0.243 * pow(T4, (0.12 + 0.031 * log(T4))) * 5; };
auto k32_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega32_OIII(T4) / g3_OIII; };
auto k23_OIII = [](double T4) { return g3_OIII / g2_OIII * k32_OIII(T4) * exp(-E32_OIII / (kb * T4) / 10000); };
auto Omega40_OIII = [](double T4) { return 0.0321 * pow(T4, (0.118 + 0.057 * log(T4))); };
auto k40_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega40_OIII(T4) / g4_OIII; };
auto k04_OIII = [](double T4) { return g4_OIII / g0_OIII * k40_OIII(T4) * exp(-E40_OIII / (kb * T4) / 10000); };
auto Omega41_OIII = [](double T4) { return 0.0321 * pow(T4, (0.118 + 0.057 * log(T4))) * 3; };
auto k41_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega41_OIII(T4) / g4_OIII; };
auto k14_OIII = [](double T4) { return g4_OIII / g1_OIII * k41_OIII(T4) * exp(-E41_OIII / (kb * T4) / 10000); };
auto Omega42_OIII = [](double T4) { return 0.0321 * pow(T4, (0.118 + 0.057 * log(T4))) * 5; };
auto k42_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega42_OIII(T4) / g4_OIII; };
auto k24_OIII = [](double T4) { return g4_OIII / g2_OIII * k42_OIII(T4) * exp(-E42_OIII / (kb * T4) / 10000); };
auto Omega43_OIII = [](double T4) { return 0.523 * pow(T4, (0.210 - 0.099 * log(T4))); };
auto k43_OIII = [](double T4) { return 8.629e-8 / sqrt(T4) * Omega43_OIII(T4) / g4_OIII; };
auto k34_OIII = [](double T4) { return g4_OIII / g3_OIII * k43_OIII(T4) * exp(-E43_OIII / (kb * T4) / 10000); };
// Five level rates for OIII
auto R01_OIII = [&](double ne, double T4) { return ne * k01_OIII(T4); };
auto R02_OIII = [&](double ne, double T4) { return ne * k02_OIII(T4); };
auto R03_OIII = [&](double ne, double T4) { return ne * k03_OIII(T4); };
auto R04_OIII = [&](double ne, double T4) { return ne * k04_OIII(T4); };
auto R10_OIII = [&](double ne, double T4) { return ne * k10_OIII(T4) + A10_OIII; };
auto R12_OIII = [&](double ne, double T4) { return ne * k12_OIII(T4); };
auto R13_OIII = [&](double ne, double T4) { return ne * k13_OIII(T4); };
auto R14_OIII = [&](double ne, double T4) { return ne * k14_OIII(T4); };
auto R20_OIII = [&](double ne, double T4) { return ne * k20_OIII(T4) + A20_OIII; };
auto R21_OIII = [&](double ne, double T4) { return ne * k21_OIII(T4) + A21_OIII; };
auto R23_OIII = [&](double ne, double T4) { return ne * k23_OIII(T4); };
auto R24_OIII = [&](double ne, double T4) { return ne * k24_OIII(T4); };
auto R30_OIII = [&](double ne, double T4) { return ne * k30_OIII(T4) + A30_OIII; };
auto R31_OIII = [&](double ne, double T4) { return ne * k31_OIII(T4) + A31_OIII; };
auto R32_OIII = [&](double ne, double T4) { return ne * k32_OIII(T4) + A32_OIII; };
auto R34_OIII = [&](double ne, double T4) { return ne * k34_OIII(T4); };
auto R40_OIII = [&](double ne, double T4) { return ne * k40_OIII(T4) + A40_OIII; };
auto R41_OIII = [&](double ne, double T4) { return ne * k41_OIII(T4) + A41_OIII; };
auto R42_OIII = [&](double ne, double T4) { return ne * k42_OIII(T4) + A42_OIII; };
auto R43_OIII = [&](double ne, double T4) { return ne * k43_OIII(T4) + A43_OIII; };
// NII parameters
// State degeneracy
int g0_NII = 1;
int g1_NII = 3;
int g2_NII = 5;
int g3_NII = 5;
int g4_NII = 1;
// Spontaneous decay rates (s^-1)
double A10_NII = 2.08e-6;
double A20_NII = 1.12e-12;
double A21_NII = 7.46e-6;
double A30_NII = 5.25e-7;
double A31_NII = 9.22e-7 + 9.84e-4;
double A32_NII = 8.65e-6 + 2.91e-3;
double A40_NII = 0;
double A41_NII = 3.18e-2;
double A42_NII = 1.55e-4;
double A43_NII = 1.14;
// Level energy and frequency (J and s^-1)
double E10_NII = 70 * kb;
double E20_NII = 188 * kb;
double E30_NII = 22037 * kb;
double E40_NII = 47033 * kb;
double E21_NII = E20_NII - E10_NII;
double E31_NII = E30_NII - E10_NII;
double E32_NII = E30_NII - E20_NII;
double E41_NII = E40_NII - E10_NII;
double E42_NII = E40_NII - E20_NII;
double E43_NII = E40_NII - E30_NII;
double nu10_NII = E10_NII / h;
double nu20_NII = E20_NII / h;
double nu30_NII = E30_NII / h;
double nu40_NII = E40_NII / h;
double nu21_NII = E21_NII / h;
double nu31_NII = E31_NII / h;
double nu32_NII = E32_NII / h;
double nu41_NII = E41_NII / h;
double nu42_NII = E42_NII / h;
double nu43_NII = E43_NII / h;
// Collisional (de-)excitation coefficients for NII
// Omega functions
double Omega10_NII(double T4) { return 0.431 * pow(T4, 0.099 + 0.014 * log(T4)); }
double Omega20_NII(double T4) { return 0.273 * pow(T4, 0.166 + 0.030 * log(T4)); }
double Omega21_NII(double T4) { return 1.15 * pow(T4, 0.137 + 0.024 * log(T4)); }
double Omega30_NII(double T4) { return 0.303 * pow(T4, 0.053 + 0.009 * log(T4)); }
double Omega31_NII(double T4) { return 0.909 * pow(T4, 0.053 + 0.010 * log(T4)); }
double Omega32_NII(double T4) { return 1.51 * pow(T4, 0.054 + 0.011 * log(T4)); }
double Omega40_NII(double T4) { return 0.0352 * pow(T4, 0.066 + 0.018 * log(T4)); }
double Omega41_NII(double T4) { return 0.105 * pow(T4, 0.070 + 0.021 * log(T4)); }
double Omega42_NII(double T4) { return 0.176 * pow(T4, 0.065 + 0.017 * log(T4)); }
double Omega43_NII(double T4) { return 0.806 * pow(T4, -0.175 - 0.014 * log(T4)); }
// Rate coefficients
double k10_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega10_NII(T4) / g1_NII; }
double k01_NII(double T4) { return g1_NII / g0_NII * k10_NII(T4) * exp(-E10_NII / (kb * T4) / 10000); }
double k20_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega20_NII(T4) / g2_NII; }
double k02_NII(double T4) { return g2_NII / g0_NII * k20_NII(T4) * exp(-E20_NII / (kb * T4) / 10000); }
double k21_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega21_NII(T4) / g2_NII; }
double k12_NII(double T4) { return g2_NII / g1_NII * k21_NII(T4) * exp(-E21_NII / (kb * T4) / 10000); }
double k30_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega30_NII(T4) / g3_NII; }
double k03_NII(double T4) { return g3_NII / g0_NII * k30_NII(T4) * exp(-E30_NII / (kb * T4) / 10000); }
double k31_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega31_NII(T4) / g3_NII; }
double k13_NII(double T4) { return g3_NII / g1_NII * k31_NII(T4) * exp(-E31_NII / (kb * T4) / 10000); }
double k32_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega32_NII(T4) / g3_NII; }
double k23_NII(double T4) { return g3_NII / g2_NII * k32_NII(T4) * exp(-E32_NII / (kb * T4) / 10000); }
double k40_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega40_NII(T4) / g4_NII; }
double k04_NII(double T4) { return g4_NII / g0_NII * k40_NII(T4) * exp(-E40_NII / (kb * T4) / 10000); }
double k41_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega41_NII(T4) / g4_NII; }
double k14_NII(double T4) { return g4_NII / g1_NII * k41_NII(T4) * exp(-E41_NII / (kb * T4) / 10000); }
double k42_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega42_NII(T4) / g4_NII; }
double k24_NII(double T4) { return g4_NII / g2_NII * k42_NII(T4) * exp(-E42_NII / (kb * T4) / 10000); }
double k43_NII(double T4) { return 8.629e-8 / sqrt(T4) * Omega43_NII(T4) / g4_NII; }
double k34_NII(double T4) { return g4_NII / g3_NII * k43_NII(T4) * exp(-E43_NII / (kb * T4) / 10000); }
// Five-level rates for NII
double R01_NII(double ne, double T4) { return ne * k01_NII(T4); }
double R02_NII(double ne, double T4) { return ne * k02_NII(T4); }
double R03_NII(double ne, double T4) { return ne * k03_NII(T4); }
double R04_NII(double ne, double T4) { return ne * k04_NII(T4); }
double R10_NII(double ne, double T4) { return ne * k10_NII(T4) + A10_NII; }
double R12_NII(double ne, double T4) { return ne * k12_NII(T4); }
double R13_NII(double ne, double T4) { return ne * k13_NII(T4); }
double R14_NII(double ne, double T4) { return ne * k14_NII(T4); }
double R20_NII(double ne, double T4) { return ne * k20_NII(T4) + A20_NII; }
double R21_NII(double ne, double T4) { return ne * k21_NII(T4) + A21_NII; }
double R23_NII(double ne, double T4) { return ne * k23_NII(T4); }
double R24_NII(double ne, double T4) { return ne * k24_NII(T4); }
double R30_NII(double ne, double T4) { return ne * k30_NII(T4) + A30_NII; }
double R31_NII(double ne, double T4) { return ne * k31_NII(T4) + A31_NII; }
double R32_NII(double ne, double T4) { return ne * k32_NII(T4) + A32_NII; }
double R34_NII(double ne, double T4) { return ne * k34_NII(T4); }
double R40_NII(double ne, double T4) { return ne * k40_NII(T4) + A40_NII; }
double R41_NII(double ne, double T4) { return ne * k41_NII(T4) + A41_NII; }
double R42_NII(double ne, double T4) { return ne * k42_NII(T4) + A42_NII; }
double R43_NII(double ne, double T4) { return ne * k43_NII(T4) + A43_NII; }
// Cross section for NI
std::vector<double> sigma_NI(const std::vector<double>& nu) {
std::vector<double> sigma(nu.size(), 0.0);
const double E0 = 4.034; // eV
const double sigma0 = 8.235 * 100; // Mb
const double ya = 8.033 * 10;
const double P = 3.928;
const double yw = 9.097 * pow(10, -2);
const double y0 = 8.598 * pow(10, -1);
const double y1 = 2.325;
for (size_t i = 0; i < nu.size(); ++i) {
double E = h * nu[i] * 6.242 * pow(10, 18);
if (E >= 14.53 && E <= 404.8) {
double x = E / E0 - y0;
double y = sqrt(pow(x, 2) + pow(y1, 2));
sigma[i] = sigma0 * (pow(x - 1, 2) + pow(yw, 2)) * pow(y, 0.5 * P - 5.5) * pow(1 + sqrt(y / ya), -P) * pow(10, -18); // cm^2
}
}
return sigma;
}
// Cross section for NII
std::vector<double> sigma_NII(const std::vector<double>& nu) {
std::vector<double> sigma(nu.size(), 0.0);
const double E0_NII = 6.128 * pow(10, -2); // eV
const double sigma0_NII = 1.944; // Mb
const double ya_NII = 8.163 * pow(10, 2);
const double P_NII = 8.773;
const double yw_NII = 1.043 * pow(10, 1);
const double y0_NII = 4.280 * pow(10, 2);
const double y1_NII = 2.030 * pow(10, 1);
for (size_t i = 0; i < nu.size(); ++i) {
double E_NII = h * nu[i] * 6.242 * pow(10, 18);
if (E_NII >= 29.6 && E_NII <= 423.6) {
double x_NII = E_NII / E0_NII - y0_NII;
double y_NII = sqrt(pow(x_NII, 2) + pow(y1_NII, 2));
sigma[i] = sigma0_NII * (pow(x_NII - 1, 2) + pow(yw_NII, 2)) * pow(y_NII, 0.5 * P_NII - 5.5) * pow(1 + sqrt(y_NII / ya_NII), -P_NII) * pow(10, -18); // cm^2
}
}
return sigma;
}
// Cross section for NIII
std::vector<double> sigma_NIII(const std::vector<double>& nu) {
std::vector<double> sigma(nu.size(), 0.0);
const double E0_NIII = 0.2420; // eV
const double sigma0_NIII = 0.9375; // Mb
const double ya_NIII = 278.8;
const double P_NIII = 9.156;
const double yw_NIII = 1.850;
const double y0_NIII = 187.7;
const double y1_NIII = 3.999;
for (size_t i = 0; i < nu.size(); ++i) {
double E_NIII = h * nu[i] * 6.242 * pow(10, 18);
if (E_NIII >= 47.45 && E_NIII <= 447.3) {
double x_NIII = E_NIII / E0_NIII - y0_NIII;
double y_NIII = sqrt(pow(x_NIII, 2) + pow(y1_NIII, 2));
sigma[i] = sigma0_NIII * (pow(x_NIII - 1, 2) + pow(yw_NIII, 2)) * pow(y_NIII, 0.5 * P_NIII - 5.5) * pow(1 + sqrt(y_NIII / ya_NIII), -P_NIII) * pow(10, -18); // cm^2
}
}
return sigma;
}
create a main funtion for this script
|
f962467e17398d86caecfa817b423fad
|
{
"intermediate": 0.20861786603927612,
"beginner": 0.4287492036819458,
"expert": 0.36263296008110046
}
|
41,104
|
import time
import asyncio
from decimal import Decimal, getcontext
import ast
import operator as op
from tkinter import ttk, Tk, StringVar, W, EW
import tkinter.messagebox as messagebox
# Safe evaluation setup
allowed_operators = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Pow: op.pow,
ast.BitXor: op.xor,
ast.USub: op.neg
}
class SafeEval:
def __call__(self, expr):
return self._eval(ast.parse(expr, mode='eval').body)
def _eval(self, node):
if isinstance(node, ast.Num): # <number>
return Decimal(node.n)
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return allowed_operators[type(node.op)](self._eval(node.left), self._eval(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return allowed_operators[type(node.op)](self._eval(node.operand))
raise TypeError(node)
class LargeNumberCalculatorApp:
def __init__(self, root):
self.root = root
self.safe_eval = SafeEval()
self.result_var = StringVar()
self.expression_var = StringVar()
self._build_gui()
def _build_gui(self):
ttk.Entry(self.root, textvariable=self.expression_var).pack(side='left')
ttk.Button(self.root, text="Calculate", command=self.calculate).pack(side='left')
ttk.Label(self.root, textvariable=self.result_var).pack(side='left')
async def calculate(self):
expression = self.expression_var.get()
start = time.perf_counter()
result = await self._async_calculate(expression)
duration = time.perf_counter() - start
self.result_var.set(f"Result: {result} (Computation Time: {duration:.2f} sec)")
async def _async_calculate(self, expression):
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, self.safe_eval, expression)
return result
def main():
root = Tk()
app = LargeNumberCalculatorApp(root)
root.mainloop()
if __name__ == "__main__":
main()
Could you rewrite this from scratch? I'd like a code renew. Turn it into a capable Calculator, which supports calculations of enormous numbers, such as a 1 followed by a hundred thousand zeroes. Make it not lag even if that amount of digits are in the screen.
|
9926c3184dfbd5594ae9671f3f7ef3fb
|
{
"intermediate": 0.3490576446056366,
"beginner": 0.3367691934108734,
"expert": 0.3141731917858124
}
|
41,105
|
import time
import threading
from decimal import Decimal, getcontext
import ast
import operator as op
from tkinter import ttk, Tk, StringVar
# Safe evaluation setup
allowed_operators = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Pow: op.pow,
ast.USub: op.neg
}
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class SafeEval:
def __call__(self, expr):
try:
return str(self._eval(ast.parse(expr, mode='eval').body))
except Exception as e:
return str(e)
def _eval(self, node):
if isinstance(node, ast.Num):
return Decimal(node.n)
elif isinstance(node, ast.BinOp):
return allowed_operators[type(node.op)](self._eval(node.left), self._eval(node.right))
elif isinstance(node, ast.UnaryOp):
return allowed_operators[type(node.op)](self._eval(node.operand))
else:
raise TypeError("Unsupported type: {}".format(node))
class LargeNumberCalculatorApp:
def __init__(self, master):
self.master = master
self.master.title("Large Number Calculator")
self.safe_eval = SafeEval()
self.expression_var = StringVar(master)
self.result_var = StringVar(master)
self.status_var = StringVar(master)
self.setup_widgets()
def setup_widgets(self):
ttk.Entry(self.master, textvariable=self.expression_var, font=('Arial', 12)).pack(fill='x', padx=10, pady=10)
ttk.Button(self.master, text="Calculate", command=self.on_calculate_clicked).pack()
ttk.Label(self.master, textvariable=self.result_var, font=('Arial', 12), wraplength=600, justify='left').pack(fill='x', padx=10, pady=10)
ttk.Label(self.master, textvariable=self.status_var, font=('Arial', 12)).pack(fill='x')
def on_calculate_clicked(self):
expr = self.expression_var.get()
thread = threading.Thread(target=self.async_calculate, args=(expr,))
thread.start()
def async_calculate(self, expr):
start = time.time()
self.update_status("Calculating…")
result = self.safe_eval(expr)
duration = time.time() - start
self.update_status(f"Calculation completed in {duration:.2f} seconds")
self.update_result(result)
def update_result(self, result):
if is_number(result) and len(result) > 30:
display_result = f"{result[:15]}…{result[-15:]} (total digits: {len(result)})"
else:
display_result = result
self.result_var.set(display_result)
def update_status(self, status):
self.status_var.set(status)
def main():
getcontext().prec = 1000000 # Set precision high enough for huge numbers
root = Tk()
root.geometry("700x300")
app = LargeNumberCalculatorApp(root)
root.mainloop()
if __name__ == "__main__":
main()
Rewrite, please. GUI revamp, code revamp. I know you can make it GOOD.
|
e5df8c0a6f88e02f92b27c1527801f11
|
{
"intermediate": 0.3168882429599762,
"beginner": 0.4397072494029999,
"expert": 0.24340449273586273
}
|
41,106
|
Define the relationship between the concept f testing and interval estimation
|
0fbe2c2bbf593d5e816b9b0f46055900
|
{
"intermediate": 0.2504245936870575,
"beginner": 0.18118545413017273,
"expert": 0.5683900117874146
}
|
41,107
|
import cv2
import numpy as np
import torch
from transformers import YolosImageProcessor, YolosForObjectDetection
from PIL import Image
# Tải mô hình (nhớ kiểm tra hỗ trợ GPU)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = YolosForObjectDetection.from_pretrained('yolos-tiny').to(device)
image_processor = YolosImageProcessor.from_pretrained("yolos-tiny")
# Mở video
cap = cv2.VideoCapture(0)
# Xử lý từng khung hình
while True:
ret, frame = cap.read()
if not ret:
break # Kết thúc khi video hết khung hình
# Chuẩn bị dữ liệu
images = []
for i in range(4):
ret, frame = cap.read()
if not ret:
break
image = Image.fromarray(frame)
inputs = image_processor(images=image, return_tensors="pt", size={"height": frame.shape[0], "width": frame.shape[1]})
images.append(inputs["pixel_values"])
# Chuyển đổi sang tensor batch
inputs = torch.cat(images, dim=0).to(device)
# Phát hiện đối tượng
with torch.no_grad():
outputs = model(images=inputs.pixel_values, sizes=inputs.image_sizes)
results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=torch.tensor([image.size[::-1] for image in images]))
# Vẽ các khung chứa
for i, result in enumerate(results):
frame = cv2.cvtColor(np.array(images[i].cpu().image), cv2.COLOR_RGB2BGR)
for score, label, box in zip(result["scores"], result["labels"], result["boxes"]):
box = [round(i, 2) for i in box.tolist()]
cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
cv2.imshow("Video", frame)
# Nhấn 'q' để thoát
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
61c1f17d502055b6bbf01296544da1e7
|
{
"intermediate": 0.359034925699234,
"beginner": 0.3491823077201843,
"expert": 0.2917827367782593
}
|
41,108
|
I'm making a spelling bee game with ClojureScript. Here is my code.
(ns spelling-bee.core
(:require
[clojure.string :as str]
[re-frame.core :as rf]
[reagent.core :as ra]
[reagent.dom :as rdom]
[stylefy.core :as stylefy :refer [use-style]]
[stylefy.reagent :as stylefy-reagent]
[spelling-bee.events :as events]
[spelling-bee.words :as words])
(:require-macros
[reagent.core :refer [with-let]]))
(def debug?
^boolean goog.DEBUG)
(set! *warn-on-infer* false)
;---------- stylefy components ----------
; px vs rem, google
(defn letter-style [letter-validation-sequence]
(case letter-validation-sequence
:required {:color "#4CAF50"}
:valid {:color "#000000"}
:invalid {:color "#AAAAAA" :opacity "0.5"}))
;---------- main page elements ----------
(defn spawn-words-button
"Starts the game with a preset set of words."
[]
(let [game-started (rf/subscribe [::events/game-started])]
(when-not @game-started
[:button
{:on-click #(rf/dispatch [::events/set-words-and-letters words/word-collection])
:class "button-style"}
"Get Letters!"])))
(defn submit-button
[word]
(let [input-value (rf/subscribe [::events/current-input])]
[:button
{:on-click #(when (seq word)
(println "click!")
(rf/dispatch [::events/submit-word @input-value]))
:class "button-style"}
"Submit"]))
(defn text-input
"Field for the user to input a word of their choosing."
[]
(let [input-value (rf/subscribe [::events/current-input])]
[:input {:type "text"
:placeholder "Type here!"
:value @input-value
:on-change #(rf/dispatch [::events/set-current-input (-> % .-target .-value)])
:class "input-style"}]))
(defn shuffle-order-button!
"Shuffles the order of the letters displayed."
[display-letters]
[:button {:on-click #(rf/dispatch [::events/shuffle-letter-order display-letters])
:class "button-style"}
"Shuffle letters"])
;---------- main page renderer ----------
(defn main-panel []
#_{:clj-kondo/ignore [:unresolved-symbol]}
(with-let [name (rf/subscribe [::events/name])
game-started (rf/subscribe [::events/game-started])
words (rf/subscribe [::events/words])
found-words (rf/subscribe [::events/found-words])
common-letter (rf/subscribe [::events/common-letter])
letters (rf/subscribe [::events/letters])
display-letters (rf/subscribe [::events/display-letters])
current-input (rf/subscribe [::events/current-input])
message (rf/subscribe [::events/message])
score (rf/subscribe [::events/score])
database (rf/subscribe [::events/dbdb])]
[:html
[:head
[:title "Spelling Bee!"]
[:style {:id "_stylefy-server-styles_"} "_stylefy-server-styles-content_"]
[:style {:id "_stylefy-constant-styles_"}]
[:style {:id "_stylefy-styles_"}]]
[:body {:class "body-background"}
[:div
[:div {:class "main-style"}
[:h1
"Hello, " @name]
;[:p "debug: "@database]
[:h3 @message]
[spawn-words-button]
(when @game-started
[:div {:class "main-container-style"}
[:div {:class "main-panel-style"}
[:div (use-style {:text-align "center"})
[text-input]
[submit-button @current-input]]
[:p "Common Letter: " (str (first @common-letter))]
[:p "Other Letters: " (str/join ", " @display-letters)]
[:div (use-style {:text-align "center"})
[shuffle-order-button! @display-letters]]
[:h3 "Your score: " @score]]
[:div {:class "side-panel-style"}
[:h3
"Found words:"]
[:ul (for [word (sort @found-words)] ; sort found words into an alphabetical list
[:li word])]]])
]]]]))
;---------- page load parameters ----------
(defn dev-setup []
(when debug?
(println "dev mode")))
(defn ^:dev/after-load mount-root []
(rf/clear-subscription-cache!)
(let [root-el (.getElementById js/document "app")]
(rdom/unmount-component-at-node root-el)
(rdom/render [main-panel] root-el)))
(defn install-global-key-listeners []
(.addEventListener js/window "keydown" events/global-key-handler))
(defn init []
(install-global-key-listeners) ; listen for keypress events
(rf/dispatch-sync [::events/initialize-db]) ; get re-frame atom initialized
(stylefy/init {:dom (stylefy-reagent/init)}) ; set up css
(dev-setup)
(mount-root))
(ns spelling-bee.events
(:require
[clojure.set :as set]
[clojure.string :as str]
[re-frame.core :as rf]))
;---------- our app state atom ----------
(def default-db
{:name "player"
:game-started false
:words #{}
:common-letter #{}
:letters #{}
:display-letters []
:found-words #{}
:current-input ""
:message "Welcome to the Spelling Bee!"
:score 0})
;---------- handlers ----------
(defn global-key-handler [e]
(let [key (.-key e)
input-value (rf/subscribe [::current-input])]
(cond
(re-matches #"[a-zA-Z]" key)
(rf/dispatch [::append-current-input (str key)])
(= key "Enter")
(rf/dispatch [::submit-word @input-value])
(= key "Backspace")
(let [subtract-letter #(subs % 0 (dec (count %)))]
(rf/dispatch [::set-current-input (subtract-letter @input-value)]))
:else
nil)))
; remove subscribe, do in functions
;---------- various functions ----------
;; Later this can be substituted with a database call to pull a list of words.
(defn get-unique-letter-collection [word-set]
(-> word-set
vec
str/join
seq
set))
(defn find-common-letter [word-set]
(reduce
set/intersection
(map set (seq word-set))))
(defn validate-word
"Checks the given word against the current word list and letter set to see if it is valid. Gives the following keywords as a result.
:submit-ok :too-short :invalid :no-common :not-in-list :other"
[word word-list letters common-letter]
(cond
(contains? word-list word) :submit-ok ; first check if the word is in the word collection
(> 4 (count (seq word))) :too-short ; check length, notify if less than 3 letters
(not (every? letters (set word))) :invalid ; check if every letter in the word is in letters set
(not (contains? (set word) (first common-letter))) :no-common ; if it does not contain the common letter
(contains? (set word) (first common-letter)) :not-in-list ; then check if the word at least contains common letter
:else :other)) ; generic if it somehow manages to not match one of the above
(defn validate-letter [letter letters common-letter]
(cond
(= letter (str (first common-letter))) :required
(contains? (set letters) letter) :valid
:else :invalid))
(defn calculate-points [word letters]
(cond
(= (get-unique-letter-collection word) (set letters)) (+ (count (seq word)) 7)
(= (count (seq word)) 4) (int 1)
:else (count (seq word))))
;; (map #(validate-letter #{}) (seq "arroyo"))
;---------- subscriptions to data from app state ----------
(rf/reg-sub ::name
(fn [db]
(:name db)))
(rf/reg-sub ::game-started
(fn [db]
(:game-started db)))
(rf/reg-sub ::words
(fn [db]
(:words db)))
(rf/reg-sub ::found-words
(fn [db]
(:found-words db)))
(rf/reg-sub ::common-letter
(fn [db]
(:common-letter db)))
(rf/reg-sub ::letters
(fn [db]
(:letters db)))
(rf/reg-sub ::display-letters
(fn [db]
(:display-letters db)))
(rf/reg-sub ::current-input
(fn [db]
(:current-input db)))
(rf/reg-sub ::message
(fn [db]
(:message db)))
(rf/reg-sub ::score
(fn [db]
(:score db)))
(rf/reg-sub ::dbdb
(fn [db]
db))
;---------- events ----------
(rf/reg-event-db ::initialize-db
(fn [_ _]
default-db))
(rf/reg-event-db ::set-words-and-letters
(fn [db [_ word-set]]
(let [common-letter (find-common-letter word-set)
letter-coll (get-unique-letter-collection word-set)]
(assoc db :words word-set
:common-letter common-letter
:letters letter-coll
:display-letters (shuffle (vec (remove common-letter letter-coll)))
:game-started true))))
(rf/reg-event-db ::set-current-input
(fn [db [_ input-value]]
(assoc db :current-input input-value)))
(rf/reg-event-db ::append-current-input
(fn [db [_ input-value]]
(update db :current-input str input-value)))
(rf/reg-event-db ::shuffle-letter-order
(fn [db [_ display-letters]]
(assoc db :display-letters (shuffle display-letters))))
(rf/reg-event-db ::submit-word
(fn [db [_ word]]
(let [letters (:letters db)
common-letter (:common-letter db)
words (:words db)
point-val (calculate-points word letters)]
(case (validate-word word words letters common-letter)
:submit-ok (if (contains? (:found-words db) word)
(assoc db :message "You've already found that word!")
(-> db
(update :found-words conj word)
(update :score + point-val)
(assoc :message (str "Great job! You found " word ", worth a score of " point-val "!")))) ; add the valid word to found words
:too-short (assoc db :message "Only words with 4 letters or more count.")
:not-in-list (assoc db :message (str "Sorry, " word " isn't in the word list today."))
:no-common (assoc db :message "Nice try, but the word needs to contain the common letter.")
:invalid (assoc db :message "All letters in the word must be from the given letter set.")
:other (assoc db :message "Try again.")))))
; use reg-event-fx to dispatch further event to clear input
Can you help me clean it up?
|
c9bccd832e454e5b4ffadda31e06fbc3
|
{
"intermediate": 0.4805363416671753,
"beginner": 0.3770674169063568,
"expert": 0.1423962116241455
}
|
41,109
|
Here is my current code.
(ns spelling-bee.core
(:require
[re-frame.core :as rf]
[reagent.core :as ra :refer [with-let]]
[reagent.dom :as rdom]
[stylefy.core :as stylefy :refer [use-style]]
[stylefy.reagent :as stylefy-reagent]
[spelling-bee.events :as events]
[spelling-bee.elements :as elements :refer [db-get]]))
(def debug?
^boolean goog.DEBUG)
(set! *warn-on-infer* false)
;---------- main page renderer ----------
(defn main-panel []
#_{:clj-kondo/ignore [:unresolved-symbol]}
(with-let [database (rf/subscribe [::events/dbdb])
name (db-get :name)
game-started (db-get :game-started)
words (db-get :words)
found-words (db-get :found-words)
common-letter (db-get :common-letter)
letters (db-get :letters)
display-letters (db-get :display-letters)
current-input (db-get :current-input)
message (db-get :message)
score (db-get :score)
shake-message? (db-get :shake-message?)
shake-angry? (db-get :shake-angry?)]
[:div
[:div {:class "main-style"}
[:h1
"Hello, " @name]
;[:p "debug: "@database]
[:h3 {:class (str ""(when @shake-message? "shake") (when @shake-angry? "-angry"))} @message]
[elements/message-component]
[elements/spawn-words-button]
(when @game-started
[:div {:class "main-container-style"}
[:div {:class "main-panel-style"}
[:div (use-style {:text-align "center"})
[elements/styled-text-input]
[elements/submit-button @current-input]]
[:div {:class "letter-buttons-container"}
[elements/letter-buttons-panel @display-letters (first @common-letter)]]
[:div (use-style {:text-align "center"})
[elements/shuffle-order-button @display-letters]
[elements/backspace-button]]
[:h3 "Your score: " @score]]
[:div {:class "side-panel-style"}
[:h3
"Found words:"]
[:ul (for [word (sort @found-words)] ; sort found words into an alphabetical list
[:li word])]]])
]]))
;---------- page load parameters ----------
(defn dev-setup []
(when debug?
(println "dev mode")))
(defn ^:dev/after-load mount-root []
(rf/clear-subscription-cache!)
(let [root-el (.getElementById js/document "app")]
(rdom/unmount-component-at-node root-el)
(rdom/render [main-panel] root-el)))
(defn install-global-key-listeners []
(.addEventListener js/window "keydown" (fn [e] (rf/dispatch [::events/key-press (.-key e)]))))
#_{:clj-kondo/ignore [:clojure-lsp/unused-public-var]}
(defn init []
(install-global-key-listeners) ; listen for keypress events
(rf/dispatch-sync [::events/initialize-db]) ; get re-frame atom initialized
(stylefy/init {:dom (stylefy-reagent/init)}) ; set up css
(dev-setup)
(mount-root))
/* some unsorted css style */
.main-style {
padding: 20px;
width: 100%;
max-width: 600px;
margin: 0 auto;
font-family: ‘Open Sans’, sans-serif;
border: 3px solid #ccc;
background-color: #F0FFFF;
}
.button-style {
background-color: #4CAF50;
border: none;
color: white;
border-color: #4CAF50;
padding: 15px 32px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 22px;
margin-top: 8px;
margin-bottom: 8px;
cursor: pointer;
clip-path: polygon(50% 0%, 100% 50%, 50% 100%, 0% 50%);
transition: transform 0.2s ease-in-out;
}
.button-style:hover {
transform: scale(1.5);
}
.garbage-can-button {
background: none;
border: none;
cursor: pointer;
font-size: 30px;
color:#ffc400;
padding: 10px;
transition: transform 0.2s ease-in-out;
}
.garbage-can-button:hover {
transform: scale(1.5);
}
.input-style {
color: transparent;
font-size: 18px;
padding: 10px;
border: 4px solid #cfc;
border-radius: 3px;
margin-bottom: 5px;
font-size: 22px;
font-family: inherit;
position: relative;
justify-content: left;
pointer-events: none;
}
.input-container {
position: relative;
}
.styled-letters {
position: absolute;
font-size: 22px;
margin-top: 14px;
margin-left: 14px;
top: 0;
left: 0;
text-align: center;
pointer-events: none;
}
.side-panel-style {
flex: 1;
padding: 10px;
margin-left: 20px;
border: 3px solid #ccc;
border-radius: 4px;
background-color: #FFFFFF;
max-width: 200px;
}
.main-panel-style {
max-width: 280px;
}
.main-container-style {
display: flex;
justify-content: space-around;
}
.body-background {
background-image: url('images/tokimemobgscroll.gif');
background-repeat: repeat;
}
/* animation */
@keyframes shake {
0% { transform: translate(0px, 0px); }
25% { transform: translate(-5px, 0px); }
50% { transform: translate(5px, 0px); }
75% { transform: translate(-5px, 0px); }
100% { transform: translate(0px, 0px); }
}
@keyframes angry-shake {
0%, 100% { transform: translateX(0); }
10%, 30%, 50%, 70%, 90% { transform: translateX(-10px); }
20%, 40%, 60%, 80% { transform: translateX(10px); }
}
.shake {
animation: shake 0.5s;
}
.shake-angry {
animation: angry-shake 0.5s;
}
@keyframes slide-in {
from {
transform: translateX(100%);
opacity: 0;
}
to {
transform: translateX(0);
opacity: 1;
}
}
@keyframes fade-out {
from {
transform: translateX(0);
opacity: 1;
}
to {
transform: translateX(0);
opacity: 0;
}
}
.message-animation {
animation: slide-in 0.5s ease-out, fade-out 1s ease-in 2.5s forwards; /* hold the message for 2.5 seconds before fading out /
will-change: transform, opacity; / Optimization for smoother animations */
}
/* cool button styles */
.hex-button {
width: 60px;
height: 70px;
background-color: #ffc400;
border: 0px;
text-align: center;
font-size: 24px;
margin: 0px;
cursor: pointer;
clip-path: polygon(50% 0%, 100% 25%, 100% 75%, 50% 100%, 0% 75%, 0% 25%);
transition: transform 0.2s ease-in-out;
}
.hex-button:hover {
transform: scale(1.2);
}
.letter-buttons-panel {
text-align: center;
padding-top: 10px;
padding-bottom: 10px;
}
.hex-row {
display: flex;
justify-content: center;
}
.hex-button {
margin: 4px;
}
.top-row,
.bottom-row {
margin-top: -16px;
margin-bottom: -16px;
}
(ns spelling-bee.elements
(:require
[clojure.string :as str]
[re-frame.core :as rf]
[stylefy.core :as stylefy :refer [use-style]]
[spelling-bee.events :as events]
[spelling-bee.logic :as logic]
[spelling-bee.words :as words]))
;---------- shorthand ----------
(defn db-get [key]
(rf/subscribe [::events/get key]))
;---------- stylefy components ----------
; px vs rem, google
(defn letter-style [letter-validation-sequence]
(case letter-validation-sequence
:required {:color "#4CAF50"}
:valid {:color "#000000"}
:invalid {:color "#AAAAAA" :opacity "0.5"}))
;---------- button elements ----------
(defn spawn-words-button
"Starts the game with a preset set of words."
[]
(let [game-started (db-get :game-started)]
(when-not @game-started
[:button
{:on-click #(rf/dispatch [::events/set-words-and-letters words/word-collection])
:class "button-style"}
"Get Letters!"])))
(defn submit-button
[word]
(let [input-value (db-get :current-input)]
[:button
{:on-click #(when (seq word)
(println "click!")
(rf/dispatch [::events/submit-word @input-value]))
:class "button-style"}
"Submit"]))
(defn shuffle-order-button
"Shuffles the order of the letters displayed."
[display-letters]
[:button {:on-click #(rf/dispatch [::events/shuffle-letter-order display-letters])
:class "button-style"}
"Shuffle letters"])
(defn backspace-button []
[:button {:class "garbage-can-button"
:on-click #(rf/dispatch [::events/delete-last-letter])}
[:i {:class "fa fa-trash"}]])
(defn hex-button
([letter & [is-common]]
[:button.hex-button {:on-click #(rf/dispatch [::events/append-current-input letter])
:style (when is-common {:font-weight "bold" :color "#4CAF50"})}
letter]))
;---------- input elements ----------
(defn styled-letter [letter valid-letters common-letter]
(let [letter-validation (logic/validate-letter letter valid-letters common-letter)]
[:span (use-style (letter-style letter-validation)) letter]))
(defn styled-text-input []
(let [input-value (db-get :current-input)
valid-letters (db-get :letters)
common-letter (db-get :common-letter)]
[:div.input-container
[:input.input-style
{:type "text"
:placeholder (when (str/blank? @input-value) "Type here!")
:value @input-value
:on-change #(rf/dispatch [::events/get :set-current-input (-> % .-target .-value)])}]
[:div.styled-letters
(map-indexed
(fn [_ ltr]
[styled-letter ltr @valid-letters @common-letter])
@input-value)]]))
(defn letter-buttons-panel [valid-letters common-letter]
(let [top-row (take 2 valid-letters)
middle-row (concat (take 1 (drop 2 valid-letters)) [common-letter] (take 1 (drop 3 valid-letters)))
bottom-row (take 2 (drop 4 valid-letters))]
[:div.letter-buttons-panel
[:div.hex-row.top-row (map hex-button top-row)]
;; if index is 1 (the middle one), it passes letter and a truthy value into the argument so it can be detected.
[:div.hex-row.middle-row (doall (map-indexed (fn [idx letter] (hex-button letter (and (= idx 1) (= letter common-letter)))) middle-row))]
[:div.hex-row.bottom-row (map hex-button bottom-row)]]))
;---------- pop-in etc elements ----------
(defn message-component []
(let [message (db-get :message)
showing-message (db-get :message-show)]
(when @showing-message
[:div.message-animation {:class (str "" (when @showing-message "message-animation"))}
@message])))
(ns spelling-bee.events
(:require
[re-frame.core :as rf]
[spelling-bee.logic :as logic]))
;---------- our app state atom ----------
(def default-db
{:name "player"
:game-started false
:words #{}
:common-letter #{}
:letters #{}
:display-letters []
:found-words #{}
:current-input ""
:message "Welcome to the Spelling Bee!"
:message-show false
:score 0
:shake-message false
:shake-angry false})
;---------- handlers ----------
(defn global-key-handler [key db]
(cond
(re-matches #"[a-zA-Z]" key) [::append-current-input (str key)]
(= key "Enter") [::submit-word (:current-input db)]
(= key "Backspace") [::delete-last-letter]
:else nil))
;---------- subscriptions to data from app state ----------
(rf/reg-sub ::get
(fn [db [_ key]]
(get db key)))
(rf/reg-sub ::dbdb
(fn [db]
db))
;---------- events ----------
(rf/reg-event-db ::initialize-db
(fn [_ _]
default-db))
(rf/reg-event-db ::set-words-and-letters
(fn [db [_ word-set]]
(let [common-letter (logic/find-common-letter word-set)
letter-coll (logic/get-unique-letter-collection word-set)]
(assoc db :words word-set
:common-letter common-letter
:letters letter-coll
:display-letters (shuffle (vec (remove common-letter letter-coll)))
:game-started true))))
(rf/reg-event-fx ::key-press
(fn [{:keys [db]} [_ key]]
(let [result (global-key-handler key db)]
(when result
{:dispatch result}))))
(rf/reg-event-db ::set-current-input
(fn [db [_ input-value]]
(assoc db :current-input input-value)))
(rf/reg-event-fx ::set-message
(fn [db [_ message]]
(do
(js/setTimeout #(rf/dispatch [::reset-message]) 4000)
(assoc db :message message :showing-message true))))
(rf/reg-event-db ::reset-message
(fn [db _]
(assoc db :shake-message false :shake-angry false)))
(rf/reg-event-db ::append-current-input
(fn [db [_ input-value]]
(if (> 20 (count (seq (:current-input db))))
(update db :current-input str input-value)
(assoc db :message "Less letters please!"))))
(rf/reg-event-db ::delete-last-letter
(fn [db _]
(update db :current-input #(subs % 0 (max 0 (dec (count %)))))))
(rf/reg-event-db ::shuffle-letter-order
(fn [db [_ display-letters]]
(assoc db :display-letters (shuffle display-letters))))
(rf/reg-event-db ::reset-shake-message
(fn [db _]
(assoc db :shake-message false :shake-angry false)))
(rf/reg-event-db ::submit-word
(fn [db [_ word]]
(let [letters (:letters db)
common-letter (:common-letter db)
words (:words db)
point-val (logic/calculate-points word letters)
submit (partial assoc db :current-input "" :message)]
(js/setTimeout #(rf/dispatch [::reset-shake-message]) 500) ; preemptively set a timeout to kill any shaking
(case (logic/validate-word word words letters common-letter)
:submit-ok (if (contains? (:found-words db) word)
(submit "You've already found that word!")
(-> db
(update :found-words conj word)
(update :score + point-val)
(assoc :current-input "" :message (str "Great job! You found " word ", worth a score of " point-val "!")))) ; add the valid word to found words
:too-short (submit "Only words with 4 letters or more count.")
:not-in-list (submit (str "Sorry, " word " isn't in the word list today."))
:no-common (submit "Nice try, but the word needs to contain the common letter." :shake-message true)
:invalid (submit "All letters in the word must be from the given letter set." :shake-message true :shake-angry true)
:other (submit "Try again.")))))
; use reg-event-fx to dispatch further event to clear input
(ns spelling-bee.logic
(:require
[clojure.set :as set]
[clojure.string :as str]))
;---------- various functions ----------
(defn get-unique-letter-collection [word-set]
(-> word-set
vec
str/join
seq
set))
(defn find-common-letter [word-set]
(reduce
set/intersection
(map set (seq word-set))))
(defn validate-word
"Checks the given word against the current word list and letter set to see if it is valid. Gives the following keywords as a result.
:submit-ok :too-short :invalid :no-common :not-in-list :other"
[word word-list letters common-letter]
(cond
(contains? word-list word) :submit-ok ; first check if the word is in the word collection
(> 4 (count (seq word))) :too-short ; check length, notify if less than 3 letters
(not (every? letters (set word))) :invalid ; check if every letter in the word is in letters set
(not (contains? (set word) (first common-letter))) :no-common ; if it does not contain the common letter
(contains? (set word) (first common-letter)) :not-in-list ; then check if the word at least contains common letter
:else :other)) ; generic if it somehow manages to not match one of the above
(defn validate-letter [letter letters common-letter]
(cond
(= letter (str (first common-letter))) :required
(contains? (set letters) letter) :valid
:else :invalid))
(defn calculate-points [word letters]
(cond
(= (get-unique-letter-collection word) (set letters)) (+ (count (seq word)) 7)
(= (count (seq word)) 4) (int 1)
:else (count (seq word))))
Currently, the pop-up message component doesn't appear.
|
1b1437e57fd30ae00774e508bd47fc3b
|
{
"intermediate": 0.28315743803977966,
"beginner": 0.6590136289596558,
"expert": 0.05782891809940338
}
|
41,110
|
HI!
|
b3bf9797f33d7a40a2ea2bcaa4340a26
|
{
"intermediate": 0.3374777138233185,
"beginner": 0.2601830065250397,
"expert": 0.40233927965164185
}
|
41,111
|
#include <stdio.h>
void findLongestSubsequence(int a[], int n) {
int lengths[n];
int prevIndices[n];
int maxLength = 1;
int endIndex = 0;
// Initialize lengths and prevIndices arrays
for (int i = 0; i < n; i++) {
lengths[i] = 1;
prevIndices[i] = -1;
}
// Find lengths of increasing subsequences
for (int i = 1; i < n; i++) {
for (int j = 0; j < i; j++) {
if (a[i] > a[j] && lengths[j] + 1 > lengths[i]) {
lengths[i] = lengths[j] + 1;
prevIndices[i] = j;
if (lengths[i] > maxLength) {
maxLength = lengths[i];
endIndex = i;
}
}
}
}
// Print the length of the longest increasing subsequence
printf("%d\n", maxLength);
// Print the indices of the elements in the longest increasing subsequence
int indices[maxLength];
int current = endIndex;
while (current >= 0) {
indices[--maxLength] = current;
current = prevIndices[current];
}
for (int i = 0; i < maxLength; i++) {
printf("%d ", indices[i]);
}
printf("\n");
}
int main() {
int n;
scanf("%d", &n);
int a[n];
for (int i = 0; i < n; i++) {
scanf("%d", &a[i]);
}
findLongestSubsequence(a, n);
return 0;
}
for input-
9
1 5 6 7 2 8 3 4 10
output should be -
4
0 4 6 7
|
b240f1bb785857f6b140aa019ad0b65e
|
{
"intermediate": 0.32432815432548523,
"beginner": 0.3999786376953125,
"expert": 0.27569326758384705
}
|
41,112
|
I would like to create Adobe Bridge start up script which adds a command under tools menu and when clicked copies selected files into to desired folder. Our desired folder is : \\10.202.12.20\import\smbimp1
|
62beef4286d2267cff9a1e4d5c356937
|
{
"intermediate": 0.39071890711784363,
"beginner": 0.2710149884223938,
"expert": 0.3382661044597626
}
|
41,113
|
#include <stdio.h>
// struct node
// {
// int data;
// int index;
// struct node *next;
// };
int function(int arr[], int n)
{
int freq[n];
int max = 1;
for (int i = 0; i < n; i++) {
freq[i] = 1;
for (int j = 0; j < i; j++) {
if (arr[j] + 1 == arr[i] && freq[j] + 1 > freq[i]) {
freq[i] = freq[j] + 1;
if (freq[i] > max)
{
max = freq[i];
}
}
}
}
return max;
}
int main() {
int n;
scanf("%d", &n);
int arr[n];
int freq[n];
// struct node *head = NULL;
// struct node *temp = head;
for (int i = 0; i < n; i++)
{
int data;
scanf("%d", &arr[i]);
//freq[(a[i]%n)]=i;
//printf("arr[i]-%d\t freq[i]-%d\t i-%d\n",arr[i],freq[i],i);
}
int length = function(arr, n);
printf("%d\n", length);
return 0;
}
change the code to print also the indices of the elements of the longest subsequence
|
103d7ac035e2ec5fc4966acba55b029e
|
{
"intermediate": 0.31522470712661743,
"beginner": 0.4208649694919586,
"expert": 0.2639103829860687
}
|
41,114
|
How to have excellent problem solving abilities
|
4dc1c0f85e66761a248d638b551abbfd
|
{
"intermediate": 0.47872194647789,
"beginner": 0.1873134821653366,
"expert": 0.33396458625793457
}
|
41,115
|
#include <stdio.h>
#include <stdlib.h>
// struct node
// {
// int data;
// int index;
// struct node *next;
// };
int func(int arr[], int *index, int n)
{
int freq[n];
int maxindex = 0;
int max = 1;
int prev[n];
for (int i = 0; i < n; ++i)
{
freq[i]=1;
prev[i]=-1;
for (int j=0;j<i;++j)
{
if ((arr[j]==arr[i]-1)&&(freq[j]+1>freq[i]))
{
freq[i]=freq[j]+1;
prev[i]=j;
if(freq[i]>max)
{
max=freq[i];
maxindex=i;
}
}
}
}
int j=maxindex;
for (int i=max-1;i>=0;i--)
{
index[i]=j;
j=prev[j];
}
return max;
}
int main()
{
int n;
scanf("%d", &n);
int arr[n];
//int freq[n];
int* index=(int*)malloc(n*sizeof(int));
// struct node *head = NULL;
// struct node *temp = head;
for (int i = 0; i < n; i++)
{
//int data;
scanf("%d", &arr[i]);
//freq[(arr[i]%n)]=i;
//printf("arr[i]-%d\t freq[i]-%d\t i-%d\n",arr[i],freq[i],i);
}
int length=func(arr,index,n);
printf("%d\n",length);
for(int i=0;i<length;i++)
{
printf("%d ",index[i]);
}
free(index);
// qsort((void*)arr, size, sizeof(arr[0]), comparator);
return 0;
}
time limit exeeded
|
deb85b8deef50ac454aad911dab03fc2
|
{
"intermediate": 0.3432883322238922,
"beginner": 0.4157068431377411,
"expert": 0.2410047948360443
}
|
41,116
|
How to get hostname by mac addres using powershell command. Write only command
|
7cfa575479c33f42523ac240a5aa9b5a
|
{
"intermediate": 0.3457479774951935,
"beginner": 0.270687073469162,
"expert": 0.3835649788379669
}
|
41,117
|
in the below given code i am getting the follwoing error please rectify the error
WARNING:tensorflow:Gradients do not exist for variables ['actor_critic/dense_2/kernel:0', 'actor_critic/dense_2/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument?
WARNING:tensorflow:Gradients do not exist for variables ['actor_critic/dense_2/kernel:0', 'actor_critic/dense_2/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument?
# Define the neural network architecture for the actor and critic
class ActorCritic(tf.keras.Model):
def __init__(self, n_actions):
super(ActorCritic, self).__init__()
self.dense1 = Dense(64, activation='relu')
self.policy = Dense(n_actions, activation='softmax')
self.value = Dense(1, activation=None)
def call(self, state):
x = self.dense1(state)
policy = self.policy(x)
value = self.value(x)
return policy, value
# PPO Agent
class PPOAgent:
def __init__(self, input_dims, alpha=0.0003, beta=0.0003, gamma=0.99, n_actions=13):
self.actor_critic = ActorCritic(n_actions)
self.actor_critic.compile(optimizer=Adam(learning_rate=alpha))
self.gamma = gamma
self.n_actions = n_actions
self.action_space = np.arange(n_actions)
def choose_action(self, input_observation):
state = tf.convert_to_tensor([input_observation], dtype=tf.float32)
probabilities, _ = self.actor_critic(state)
action_probs = tfp.distributions.Categorical(probs=probabilities)
action = action_probs.sample()
return action.numpy()[0]
def learn(self, state, action, reward, next_state, done):
state = tf.convert_to_tensor([state], dtype=tf.float32)
next_state = tf.convert_to_tensor([next_state], dtype=tf.float32)
reward = tf.convert_to_tensor(reward, dtype=tf.float32)
with tf.GradientTape(persistent=True) as tape:
state_value, _ = self.actor_critic(state)
next_state_value, _ = self.actor_critic(next_state)
state_value = tf.squeeze(state_value)
next_state_value = tf.squeeze(next_state_value)
critic_loss = tf.math.square(reward + self.gamma * next_state_value * (1 - int(done)) - state_value)
probabilities, _ = self.actor_critic(state)
action_probs = tfp.distributions.Categorical(probs=probabilities)
log_probs = action_probs.log_prob(action)
ratios = tf.exp(log_probs - action_probs.entropy())
actor_loss = -tf.math.minimum(ratios * reward, tf.clip_by_value(ratios, 1.0 - 0.2, 1.0 + 0.2) * reward)
total_loss = critic_loss + actor_loss
gradient_actor = tape.gradient(total_loss, self.actor_critic.trainable_variables)
self.actor_critic.optimizer.apply_gradients(zip(gradient_actor, self.actor_critic.trainable_variables))
# Define the values for server_address, username, and password
server_address = 'xxxx'
username = 'xxxx'
password = 'xxxx'
# Initialize environment and agent
env = CustomEnvironment(server_address, username, password)
# Determine the value of 'n' for the 'input_dims'
n = len(bounds_low)
agent = PPOAgent(input_dims=n, n_actions=13)
# Training loop
n_episodes = 1000
for episode in range(n_episodes):
done = False
input_observation, perf_observation = env.reset() # Get both states
episode_reward = 0
while not done:
action = agent.choose_action(input_observation)
next_input_observation, current_perf_observation, reward, done, _ = env.step(action)
agent.learn(input_observation, action, reward, next_input_observation, done)
episode_reward += reward
input_observation = next_input_observation
print(f"Episode: {episode + 1}, Reward: {episode_reward}")
logging.info(f"Episode: {episode + 1}, Reward: {episode_reward}")
if done:
print("Mean reward reached 0. Training terminated.")
break
env.close()
|
2184b257e370ae70a1bfbd652af53892
|
{
"intermediate": 0.34622639417648315,
"beginner": 0.4148705303668976,
"expert": 0.23890306055545807
}
|
41,118
|
How to get hostname by mac addres using powershell command. dont use any coding formatting
|
e96b15afb9cd334a5ebb9fd470a3b9b2
|
{
"intermediate": 0.4168654978275299,
"beginner": 0.2716144621372223,
"expert": 0.3115200698375702
}
|
41,119
|
X:1
T:
C:
M:4/4
L:1/8
K:Cmaj
Q:1/4=90 Generate jay chou , created tune in ABC notation with a full structure, including an A and a B part (common in modern R&B style tunes).
|
c9f8185af1e0188b7e329dd6c13e667e
|
{
"intermediate": 0.3455547094345093,
"beginner": 0.28492477536201477,
"expert": 0.36952054500579834
}
|
41,120
|
How to get hostname by mac addres using powershell command. Assume that the network in question has dhcp configured. dont use any coding formatting.
|
ed83ee0537b29c349f8e026b56296ef0
|
{
"intermediate": 0.40122857689857483,
"beginner": 0.32718509435653687,
"expert": 0.2715863883495331
}
|
41,121
|
with discord bot ,how to handle the lightbulb.messagecommand? get the image attachment
|
762a63b44991cb6dbdf069e739cacfc4
|
{
"intermediate": 0.33891379833221436,
"beginner": 0.2745691239833832,
"expert": 0.3865170478820801
}
|
41,122
|
Write function on C# which converts string with float number to float
|
ecf771cc7fbcf8d5dca94a3b77bb40df
|
{
"intermediate": 0.3829217553138733,
"beginner": 0.30002570152282715,
"expert": 0.3170524835586548
}
|
41,123
|
How to connect to wi-fi network using powershell
|
02cd378f2aa086ab2d17a55eba34ad19
|
{
"intermediate": 0.24151751399040222,
"beginner": 0.24851205945014954,
"expert": 0.5099704265594482
}
|
41,124
|
How to import Wi Fi profile on windows for all users?
|
cb05d282866c713fd20cc33e5cff55c8
|
{
"intermediate": 0.47584325075149536,
"beginner": 0.13264912366867065,
"expert": 0.3915075659751892
}
|
41,125
|
Hi Please be a SAPUI5 and SAP BTP expert.
|
6fb44f4b62092387df921facf733e2b1
|
{
"intermediate": 0.4255317449569702,
"beginner": 0.16585247218608856,
"expert": 0.40861576795578003
}
|
41,126
|
javascript for creating new div element from a button
|
cac06d4829e364b0277448ab24409850
|
{
"intermediate": 0.39893558621406555,
"beginner": 0.2696254849433899,
"expert": 0.33143895864486694
}
|
41,127
|
i want to create a flutterflow complete app with functional database and business logic I need your help. Its a logistics and business super app. We have users who can still be customers and service providers on the super app called dropy. So a shop owner can still switch accounts and order items from a shop within dropy and also a logistics provider can do the same. Our Dropy Super app has shops, parcels, rides, water, garbage, emergencies, logistics and more. For shops we have a unique idea, shop owners get to create shops, and add required details to make their shop usable. select which shop category they belong, add products and all product details including advanced product details like item color whenever necessary. We support different kinds of shop business models. Shops that sell items and those that offer services. For now only laundry and dispenser bottle refillers fall in that category. For shops that sell items shop owners create their own catalog of items with their respective desired menus for product assortment purposes. Those products have unique IDs for the purposes of having a QR tag which we need stored so that shop owners can share those qr codes of the products for purposes of marketing and also a new feature we have on our app called quick pay, where the product QR codes can be printed from the app then stuck on physical items to allow customers to scan and par or scan and add to cart and checkout if its multiple products from the same shop. The scan and pay is seamless because we have a wallet in which upon scanning the customer can enter a PIN to allow the money to be deducted from their wallet. We have a wallet for all the users but also the service providers have their respective wallets so that they don’t get mixed up having business and personal money mixed up. For the shops, customers can search using a strong search engine to find items from our backend so that they can add to their carts. When a customer finds a desired product, they add on their cart and place the order to the shop first before the purchase. there is a place order button on the cart page that remains inactive after being clicked until the shop can process the order. Placing an order means the shop will see an incoming order from a customer and will then verify if the product is in stock not just online but also in the physical store since you realize shop owners are not just limited to selling using Dropy. Some shops have a physical presence where customers walk in and maybe they end up buying the products that are showing as available on the Dropy app but are not available on the physical store. This way the shop owner accepts the order by responding using a checklist to check the items the customer wanted. If the item is not available at the shop the shop owner clicks a button for out of stock, that way customer does not pay for something unavailable. When a product is in stock the shop owner clicks on process order and now the customer will see the inactive button that he clicked on to place an order turned to clickable and changes the text to checkout. If the shop owner does not have the product the customer will see the items as out of stock and the checkout button becomes unclickable since there’s nothing to checkout. If a product has been checked out, the shop owner does not prepare the item until the payment has been confirmed. So the customer goes to the payment screen where he tops up money to his wallet using Visa or any other payment provider, Once money has been topped up, it can then be deducted and that money goes to an escrow account awaiting the order to be delivered for the money to be given to the shop owner and delivery guy. On the cart, there is still a place like a plus icon that is for adding a delivery note. customers can shop from multiple shops but we make sure that the money in their wallet for the specific transaction is held for that specific transaction so that we don’t have a scenario where the customer orders items and does not have enough money in his wallet to facilitate the payment. For the wallet, there is a top-up, withdrawal, and transaction history. After the customer has paid successfully, the shop owner can then click on start after he enters the time he needs in minutes to prepare the order via an input field. When the shop owner is ready before the timer has finished the countdown they can still press the order ready button that now sends a notification to a nearby delivery guy to come pick up the order. Because a shop has multiple orders they are handling for multiple customers we have small rectangular cards for each order with a colored border that changes from green to red if the order has been delayed in terms of processing. that way the shop owner can adjust the timer and this notifies the customer as well so that the real-time tracking becomes effective. We have in-app notifications and also a screen where the customer can view the progress of their orders. Once the order has been fully processed by the shop owner, they press the order ready button which by default generates a QR code for that order when the delivery person arrives, they come and scan the QR to signify that they have picked up the order from the shop and now its en route to the customer. Once the rider arrives at the customer the customer now has to scan the QR from the rider’s phone or we also have a 4-digit SMS code that we generate as well for the customer to complete the delivery and signify that they have collected the order from the rider or delivery guy. For the rider on their end, when they receive incoming orders, they can choose to accept or decline. When they click decline, that request is given to another rider in the vicinity or nearby. When the rider clicks accept, they are given a route on where they can go pick up the item from the shop, and after they have scanned the QR at the shop they see the line on the map that draws a path to the customers’ destination. The first path was dotted to the shop then the one to the customer is a continuous bold line. He also gets the estimated time of arrival details and customer details and also the shop details on the screen so that he can either message using dropy messaging to the shop at that particular stage or call respectively. Sometimes the delivery person gets into a breakdown with the order. He can decide to cancel the delivery and the order is given to another rider which still follows the same procedure and reallocates that order to the new delivery guy. This way the new delivery guy is the one to be paid after the successful order process. Once the customer has scanned the order they can review the shop and the delivery person since we have a rating system. In case the customer has an issue with the order there is a get help button where they raise a dispute with the shop owner and the shop owner can begin the process of resolving the dispute. Dropy management only intervenes when the shop owner and customer are not in agreement. Give me a step by step complete guide like someone who has never used flutterflow before so that i dont mess things up. This include all cofigurations that i need. there are some templates and cards built into flutterflow so i can use those ones because i have my own design but i will bring in my design later once i get a hold of things
|
5cb27e4fca6f953b5d05fffc55ab7f11
|
{
"intermediate": 0.424161434173584,
"beginner": 0.40981876850128174,
"expert": 0.16601981222629547
}
|
41,128
|
You are a CSS and Accessibility expert. I have some questions regarding CSS properties with respect to accessibility.
|
76493147ed2d1a4ac1319e57859c64db
|
{
"intermediate": 0.29741016030311584,
"beginner": 0.42492276430130005,
"expert": 0.2776670753955841
}
|
41,129
|
How To Load Hugging Face Dataset
|
18b2eef2c56cc67e8b05b50948632023
|
{
"intermediate": 0.2832101285457611,
"beginner": 0.15415169298648834,
"expert": 0.5626382231712341
}
|
41,130
|
Given a singly linked list of N nodes.
The task is to find the middle of the linked list. For example. if the linked list is
1->2->3->4->5, then the middle node of the list is 3.
If there are two middle nodes(in case. when N is even), print the second middle element.
For example. if the linked list given is then the middle node of the list is 4.
|
b0e418dbdefe60b378ff76f5198c3425
|
{
"intermediate": 0.42075011134147644,
"beginner": 0.2552177309989929,
"expert": 0.32403215765953064
}
|
41,131
|
optimize this mysql query for the maximum performance and avoid redundancy
WITH result AS
(
(
WITH t1 AS (
SELECT
tar.analysis_id,
NULL AS analysis_err_id,
COALESCE(trim(REGEXP_REPLACE(tdui.device_name, '[[:space:]]+', ' ')), '') AS d_device_name,
COALESCE(trim(REGEXP_REPLACE(tcui.device_name, '[[:space:]]+', ' ')), '') AS c_device_name,
tcri.recipe_no AS recipe_no,
COALESCE(trim(REGEXP_REPLACE(tar.measure_hash, '[[:space:]]+', ' ')), '') AS measure_hash,
pj.carrier_id AS carrier_id,
mpp.lot_id,
COALESCE(trim(REGEXP_REPLACE(tad.wafer_no, '[[:space:]]+', ' ')), '') AS wafer_no,
tad.start_date as add_date,
COALESCE(trim(REGEXP_REPLACE(tcri.remark, '[[:space:]]+', ' ')), '') AS remark,
tar.result,
twi.defect_num,
COALESCE(trim(REGEXP_REPLACE(tar.pm_time, '[[:space:]]+', ' ')), '') AS pm_time,
COALESCE(trim(REGEXP_REPLACE(tar.recovery_time, '[[:space:]]+', ' ')), '') AS recovery_time,
COALESCE(trim(REGEXP_REPLACE(tad.mtbc, '[[:space:]]+', ' ')), '') AS mtbc,
COALESCE(trim(REGEXP_REPLACE(tad.mwbc, '[[:space:]]+', ' ')), '') AS mwbc,
date_format(twi.ls_result_timestamp,'%Y-%m-%d %T') AS ls_result_timestamp,
twi.seqno,
COALESCE(trim(REGEXP_REPLACE(tar.memo, '[[:space:]]+', ' ')), '') AS memo,
me.error_ref_key,
tar.error_param,
tar.error_id
FROM
dfa_new.t_analysis_result AS tar
INNER JOIN dfa_new.t_project_history AS tph
ON tar.pj_history_id = tph.pj_history_id
LEFT JOIN dfa_new.m_error AS me
ON me.error_id = substring_index(tar.error_id, ',', 1)
LEFT JOIN dfa_new.t_asm_detail tad
ON tar.asm_id = tad.asm_id
LEFT JOIN dfa_new.t_chamber_unique_info AS tcui
ON tad.chamber_id = tcui.chamber_unique_id
LEFT JOIN dfa_new.t_chamber_recipe_info AS tcri
ON tad.recipe_no = tcri.recipe_no
LEFT JOIN dfa_new.t_pcsjb_info AS pj
ON
(
tcui.device_unique_id=pj.device_unique_id
AND SUBSTRING(tad.process_recipe_name, LOCATE(' ',
tad.process_recipe_name)+1) = pj.seq_rcp_name
AND tad.start_date >= pj.pj_start_time
AND tad.end_date <= pj.pj_end_time
)
LEFT JOIN dfa_new.t_device_unique_info AS tdui
ON tcui.device_unique_id = tdui.device_unique_id
LEFT JOIN dfa_new.t_wafer_info twi
ON tar.analysis_id = twi.analysis_id
LEFT JOIN dfa_new.t_mapping AS mpp
ON
(
tar.asm_id = mpp.asm_id
AND tar.measure_hash = mpp.measure_hash
)
WHERE tph.project_id = 3
-- AND tar.add_date = '2023-02-15 16:40:38'
)
SELECT t1.*, t3.edx_timestamp
FROM t1
LEFT JOIN (
WITH t2 AS (
SELECT
ROW_Number() OVER(
PARTITION BY t_defect_detail.wafer_seqno
ORDER BY t_defect_detail.defect_id ASC
) AS row_num,
t_wafer_info.analysis_id,
date_format(t_defect_detail.edx_timestamp,'%Y-%m-%d %T') AS edx_timestamp
FROM
dfa_new.t_defect_detail
INNER JOIN dfa_new.t_wafer_info
ON t_defect_detail.wafer_seqno = t_wafer_info.seqno
WHERE t_wafer_info.seqno IN (SELECT seqno FROM t1)
)
SELECT *
FROM t2
WHERE t2.row_num = 1
) AS t3
ON t1.analysis_id = t3.analysis_id
)
UNION
(
SELECT
NULL AS analysis_id,
tae.analysis_err_id,
COALESCE(trim(REGEXP_REPLACE(tdui.device_name, '[[:space:]]+', ' ')), '') AS d_device_name,
COALESCE(trim(REGEXP_REPLACE(tcui.device_name, '[[:space:]]+', ' ')), '') AS c_device_name,
null AS recipe_no,
null AS measure_hash,
null AS carrier_id,
null AS lot_id,
null AS wafer_no,
null AS add_date,
null AS remark,
null AS result,
null AS defect_num,
null AS pm_time,
null AS recovery_time,
null AS mtbc,
null AS mwbc,
null AS ls_result_timestamp,
null AS seqno,
null AS memo,
me.error_ref_key,
tae.error_param,
tae.error_id,
null AS edx_timestamp
FROM
dfa_new.t_analysis_error AS tae
INNER JOIN dfa_new.t_project_history AS tph
ON tae.pj_history_id = tph.pj_history_id
LEFT JOIN dfa_new.m_error AS me
ON me.error_id = substring_index(tae.error_id, ',', 1)
LEFT JOIN dfa_new.t_chamber_unique_info AS tcui
ON tae.chamber_id = tcui.chamber_unique_id
LEFT JOIN dfa_new.t_device_unique_info AS tdui
ON tcui.device_unique_id = tdui.device_unique_id
WHERE tph.project_id = 3
-- AND tae.add_date = '2023-02-15 16:40:38'
)
)
SELECT
*,
CONCAT(COALESCE(analysis_err_id, ''), '-', COALESCE(analysis_id, '')) AS id
FROM
result
ORDER BY
add_date DESC,
d_device_name,
c_device_name,
recipe_no,
carrier_id,
wafer_no,
analysis_id
LIMIT
0, 20
|
b22055818074aff40cbd119e126855b2
|
{
"intermediate": 0.3301618695259094,
"beginner": 0.36651158332824707,
"expert": 0.3033265769481659
}
|
41,132
|
Give PlantUML code For:
1.Draw a class diagram for a travel booking website
a.Identify and draw classes involved.
b.Identify and mention attributes and properties of classes.
c.Identify and draw relationships between classes
Hints:
For class diagrams, identify the entities (or actors) that interact with the booking system – it can be a user making a booking, and the software that the user interacts with is also an entity (or actor). This software will consist of several entities that make it up. All entities can be mapped to classes. Give classes some attributes and operations as you like to.
|
b7b3525a7330682bdb478372d5376428
|
{
"intermediate": 0.25011956691741943,
"beginner": 0.5249060988426208,
"expert": 0.22497433423995972
}
|
41,133
|
.shader
// Made with Amplify Shader Editor v1.9.0.2
// Available at the Unity Asset Store - http://u3d.as/y3X
Shader "S_FX_Screen_01"
{
Properties
{
_Color("Color", Color) = (1,1,1,1)
_MainTex("MainTex", 2D) = "white" {}
_TilingOffset("TilingOffset", Vector) = (1,1,0,0)
_UV_Speed("UV_Speed", Vector) = (0,0,0,0)
_Mask("Mask", 2D) = "white" {}
_MaskIntensity("MaskIntensity", Float) = 1
_Alpha("Alpha", Range( 0 , 1)) = 1
}
SubShader
{
Tags { "RenderType"="Opaque" "Queue"="Transparent" }
LOD 100
CGINCLUDE
#pragma target 5.0
ENDCG
Blend SrcAlpha OneMinusSrcAlpha
AlphaToMask Off
Cull Off
ColorMask RGBA
ZWrite Off
ZTest LEqual
Offset 0 , 0
GrabPass{ }
Pass
{
Name "Unlit"
Tags { "LightMode"="ForwardBase" }
CGPROGRAM
#if defined(UNITY_STEREO_INSTANCING_ENABLED) || defined(UNITY_STEREO_MULTIVIEW_ENABLED)
#define ASE_DECLARE_SCREENSPACE_TEXTURE(tex) UNITY_DECLARE_SCREENSPACE_TEXTURE(tex);
#else
#define ASE_DECLARE_SCREENSPACE_TEXTURE(tex) UNITY_DECLARE_SCREENSPACE_TEXTURE(tex)
#endif
#ifndef UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX
//only defining to not throw compilation error over Unity 5.5
#define UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input)
#endif
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_instancing
#include "UnityCG.cginc"
#include "UnityShaderVariables.cginc"
#define ASE_NEEDS_FRAG_COLOR
struct appdata
{
float4 vertex : POSITION;
float4 color : COLOR;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float4 vertex : SV_POSITION;
#ifdef ASE_NEEDS_FRAG_WORLD_POSITION
float3 worldPos : TEXCOORD0;
#endif
float4 ase_texcoord1 : TEXCOORD1;
float4 ase_color : COLOR;
UNITY_VERTEX_INPUT_INSTANCE_ID
UNITY_VERTEX_OUTPUT_STEREO
};
ASE_DECLARE_SCREENSPACE_TEXTURE( _GrabTexture )
uniform sampler2D _MainTex;
uniform half4 _UV_Speed;
uniform half4 _TilingOffset;
uniform sampler2D _Mask;
uniform half _MaskIntensity;
uniform half4 _Color;
uniform half _Alpha;
v2f vert ( appdata v )
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
UNITY_TRANSFER_INSTANCE_ID(v, o);
float4 ase_clipPos = UnityObjectToClipPos(v.vertex);
float4 screenPos = ComputeScreenPos(ase_clipPos);
o.ase_texcoord1 = screenPos;
o.ase_color = v.color;
float3 vertexValue = float3(0, 0, 0);
#if ASE_ABSOLUTE_VERTEX_POS
vertexValue = v.vertex.xyz;
#endif
vertexValue = vertexValue;
#if ASE_ABSOLUTE_VERTEX_POS
v.vertex.xyz = vertexValue;
#else
v.vertex.xyz += vertexValue;
#endif
o.vertex = UnityObjectToClipPos(v.vertex);
#ifdef ASE_NEEDS_FRAG_WORLD_POSITION
o.worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
#endif
return o;
}
fixed4 frag (v2f i ) : SV_Target
{
UNITY_SETUP_INSTANCE_ID(i);
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i);
fixed4 finalColor;
#ifdef ASE_NEEDS_FRAG_WORLD_POSITION
float3 WorldPosition = i.worldPos;
#endif
float4 screenPos = i.ase_texcoord1;
float4 ase_screenPosNorm = screenPos / screenPos.w;
ase_screenPosNorm.z = ( UNITY_NEAR_CLIP_VALUE >= 0 ) ? ase_screenPosNorm.z : ase_screenPosNorm.z * 0.5 + 0.5;
half2 temp_output_46_0 = (ase_screenPosNorm).xy;
half4 screenColor49 = UNITY_SAMPLE_SCREENSPACE_TEXTURE(_GrabTexture,temp_output_46_0);
half mulTime23 = _Time.y * _UV_Speed.z;
half2 appendResult19 = (half2(_UV_Speed.x , _UV_Speed.y));
half2 CenteredUV15_g2 = ( temp_output_46_0 - float2( 0.5,0.5 ) );
half2 break17_g2 = CenteredUV15_g2;
half2 appendResult23_g2 = (half2(( length( CenteredUV15_g2 ) * 1.0 * 2.0 ) , ( atan2( break17_g2.x , break17_g2.y ) * ( 1.0 / 6.28318548202515 ) * 1.0 )));
half2 appendResult15 = (half2(_TilingOffset.x , _TilingOffset.y));
half2 appendResult16 = (half2(_TilingOffset.z , _TilingOffset.w));
half2 panner17 = ( ( mulTime23 + _UV_Speed.w ) * appendResult19 + (appendResult23_g2*appendResult15 + appendResult16));
finalColor = ( ( screenColor49 + ( i.ase_color * ( ( tex2D( _MainTex, panner17 ).r * ( tex2D( _Mask, temp_output_46_0 ).r * _MaskIntensity ) ) * _Color ) ) ) * i.ase_color.a * _Alpha );
return finalColor;
}
ENDCG
}
}
CustomEditor "ASEMaterialInspector"
Fallback Off
}
/*ASEBEGIN
Version=19002
0;741;1634;770;948.6564;973.3687;1;True;False
Node;AmplifyShaderEditor.ScreenPosInputsNode;10;-3411.65,-863.6804;Float;False;0;False;0;5;FLOAT4;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.ComponentMaskNode;46;-3123.942,-872.275;Inherit;False;True;True;False;False;1;0;FLOAT4;0,0,0,0;False;1;FLOAT2;0
Node;AmplifyShaderEditor.Vector4Node;18;-2375.06,-321.619;Inherit;False;Property;_UV_Speed;UV_Speed;3;0;Create;True;0;0;0;False;0;False;0,0,0,0;2.39,0.11,1,0;0;5;FLOAT4;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.Vector4Node;14;-2622.881,-505.7815;Inherit;False;Property;_TilingOffset;TilingOffset;2;0;Create;True;0;0;0;False;0;False;1,1,0,0;-0.37,8,0,0;0;5;FLOAT4;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.DynamicAppendNode;15;-2284.233,-572.3533;Inherit;False;FLOAT2;4;0;FLOAT;0;False;1;FLOAT;0;False;2;FLOAT;0;False;3;FLOAT;0;False;1;FLOAT2;0
Node;AmplifyShaderEditor.FunctionNode;27;-2692.593,-762.4178;Inherit;True;Polar Coordinates;-1;;2;7dab8e02884cf104ebefaa2e788e4162;0;4;1;FLOAT2;0,0;False;2;FLOAT2;0.5,0.5;False;3;FLOAT;1;False;4;FLOAT;1;False;1;FLOAT2;0
Node;AmplifyShaderEditor.SimpleTimeNode;23;-2140.779,-240.9574;Inherit;False;1;0;FLOAT;1;False;1;FLOAT;0
Node;AmplifyShaderEditor.DynamicAppendNode;16;-2283.234,-451.3534;Inherit;False;FLOAT2;4;0;FLOAT;0;False;1;FLOAT;0;False;2;FLOAT;0;False;3;FLOAT;0;False;1;FLOAT2;0
Node;AmplifyShaderEditor.ScaleAndOffsetNode;12;-2036.909,-727.6755;Inherit;False;3;0;FLOAT2;0,0;False;1;FLOAT2;1,0;False;2;FLOAT2;0,0;False;1;FLOAT2;0
Node;AmplifyShaderEditor.SimpleAddOpNode;25;-1931.304,-228.0426;Inherit;False;2;2;0;FLOAT;0;False;1;FLOAT;0;False;1;FLOAT;0
Node;AmplifyShaderEditor.DynamicAppendNode;19;-2018.13,-434.0237;Inherit;False;FLOAT2;4;0;FLOAT;0;False;1;FLOAT;0;False;2;FLOAT;0;False;3;FLOAT;0;False;1;FLOAT2;0
Node;AmplifyShaderEditor.WireNode;57;-2888.827,-219.0481;Inherit;False;1;0;FLOAT2;0,0;False;1;FLOAT2;0
Node;AmplifyShaderEditor.PannerNode;17;-1624.402,-600.6139;Inherit;False;3;0;FLOAT2;0,0;False;2;FLOAT2;0,0;False;1;FLOAT;1;False;1;FLOAT2;0
Node;AmplifyShaderEditor.SamplerNode;51;-1515.335,-180.8605;Inherit;True;Property;_Mask;Mask;4;0;Create;True;0;0;0;False;0;False;-1;None;f0787021b74e10f4da12b361e2b30423;True;0;False;white;Auto;False;Object;-1;Auto;Texture2D;8;0;SAMPLER2D;;False;1;FLOAT2;0,0;False;2;FLOAT;0;False;3;FLOAT2;0,0;False;4;FLOAT2;0,0;False;5;FLOAT;1;False;6;FLOAT;0;False;7;SAMPLERSTATE;;False;5;COLOR;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.RangedFloatNode;54;-1377.297,65.16128;Inherit;False;Property;_MaskIntensity;MaskIntensity;5;0;Create;True;0;0;0;False;0;False;1;4.8;0;0;0;1;FLOAT;0
Node;AmplifyShaderEditor.SamplerNode;1;-1257.771,-452.8065;Inherit;True;Property;_MainTex;MainTex;1;0;Create;True;0;0;0;False;0;False;-1;None;c788c9ae98684a745bf8575fa1b1a310;True;0;False;white;Auto;False;Object;-1;Auto;Texture2D;8;0;SAMPLER2D;;False;1;FLOAT2;0,0;False;2;FLOAT;0;False;3;FLOAT2;0,0;False;4;FLOAT2;0,0;False;5;FLOAT;1;False;6;FLOAT;0;False;7;SAMPLERSTATE;;False;5;COLOR;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.SimpleMultiplyOpNode;52;-1064.799,-165.8595;Inherit;False;2;2;0;FLOAT;0;False;1;FLOAT;0;False;1;FLOAT;0
Node;AmplifyShaderEditor.ColorNode;9;-913.5257,-128.0503;Inherit;False;Property;_Color;Color;0;0;Create;True;0;0;0;False;0;False;1,1,1,1;1,1,1,1;True;0;5;COLOR;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.SimpleMultiplyOpNode;50;-865.9789,-430.4513;Inherit;False;2;2;0;FLOAT;0;False;1;FLOAT;0;False;1;FLOAT;0
Node;AmplifyShaderEditor.SimpleMultiplyOpNode;48;-656.1394,-415.2004;Inherit;False;2;2;0;FLOAT;0;False;1;COLOR;0,0,0,0;False;1;COLOR;0
Node;AmplifyShaderEditor.VertexColorNode;55;-537.3065,-252.4023;Inherit;False;0;5;COLOR;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.SimpleMultiplyOpNode;56;-279.8831,-435.896;Inherit;False;2;2;0;COLOR;0,0,0,0;False;1;COLOR;0,0,0,0;False;1;COLOR;0
Node;AmplifyShaderEditor.ScreenColorNode;49;-1740.885,-928.5324;Inherit;False;Global;_GrabScreen0;Grab Screen 0;4;0;Create;True;0;0;0;False;0;False;Object;-1;False;False;False;False;2;0;FLOAT2;0,0;False;1;FLOAT;0;False;5;COLOR;0;FLOAT;1;FLOAT;2;FLOAT;3;FLOAT;4
Node;AmplifyShaderEditor.SimpleAddOpNode;47;-124.9826,-921.6413;Inherit;True;2;2;0;COLOR;0,0,0,0;False;1;COLOR;0,0,0,0;False;1;COLOR;0
Node;AmplifyShaderEditor.RangedFloatNode;59;37.6367,-518.9747;Inherit;False;Property;_Alpha;Alpha;6;0;Create;True;0;0;0;False;0;False;1;1;0;1;0;1;FLOAT;0
Node;AmplifyShaderEditor.SimpleMultiplyOpNode;58;418.1313,-851.847;Inherit;False;3;3;0;COLOR;0,0,0,0;False;1;FLOAT;0;False;2;FLOAT;0;False;1;COLOR;0
Node;AmplifyShaderEditor.TemplateMultiPassMasterNode;0;685.6207,-875.363;Half;False;True;-1;2;ASEMaterialInspector;100;3;S_FX_Screen_01;0770190933193b94aaa3065e307002fa;True;Unlit;0;0;Unlit;2;True;True;2;5;False;;10;False;;0;1;False;;0;False;;True;0;False;;0;False;;False;False;False;False;False;False;False;False;False;True;0;False;;True;True;2;False;;False;True;True;True;True;True;0;False;;False;False;False;False;False;False;False;True;False;255;False;;255;False;;255;False;;7;False;;1;False;;1;False;;1;False;;7;False;;1;False;;1;False;;1;False;;True;True;2;False;;True;3;False;;True;True;0;False;;0;False;;True;2;RenderType=Opaque=RenderType;Queue=Transparent=Queue=0;True;7;False;0;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;False;True;1;LightMode=ForwardBase;False;False;0;;0;0;Standard;1;Vertex Position,InvertActionOnDeselection;1;0;0;1;True;False;;False;0
WireConnection;46;0;10;0
WireConnection;15;0;14;1
WireConnection;15;1;14;2
WireConnection;27;1;46;0
WireConnection;23;0;18;3
WireConnection;16;0;14;3
WireConnection;16;1;14;4
WireConnection;12;0;27;0
WireConnection;12;1;15;0
WireConnection;12;2;16;0
WireConnection;25;0;23;0
WireConnection;25;1;18;4
WireConnection;19;0;18;1
WireConnection;19;1;18;2
WireConnection;57;0;46;0
WireConnection;17;0;12;0
WireConnection;17;2;19;0
WireConnection;17;1;25;0
WireConnection;51;1;57;0
WireConnection;1;1;17;0
WireConnection;52;0;51;1
WireConnection;52;1;54;0
WireConnection;50;0;1;1
WireConnection;50;1;52;0
WireConnection;48;0;50;0
WireConnection;48;1;9;0
WireConnection;56;0;55;0
WireConnection;56;1;48;0
WireConnection;49;0;46;0
WireConnection;47;0;49;0
WireConnection;47;1;56;0
WireConnection;58;0;47;0
WireConnection;58;1;55;4
WireConnection;58;2;59;0
WireConnection;0;0;58;0
ASEEND*/
//CHKSM=2C4622C838D2E2B5FE065FCD10F5515E70EDE67F
Shader properties can't be added to this global property sheet. Trying to add _GrabTexture_ST (type 2 count 1)
UnityEngine.GUIUtility:ProcessEvent (int,intptr,bool&)。帮我修复下shader的错误
|
52a6e8d370d3f8f2cc8b53e9bb1a3329
|
{
"intermediate": 0.2961878180503845,
"beginner": 0.4549490809440613,
"expert": 0.2488631308078766
}
|
41,134
|
give Plant UML code for: Draw a sequence diagram (picking any particular use case) for a travel booking website
a.Identify and draw objects involved in chosen use case
b.Identify and mention messages exchanged between objects identified in a.
c. Identify and draw timelines of objects identified in a.
Hint:
For sequence diagrams, pick any use case in your booking system, say ‘book tickets’. Identify the entities (classes) involved in that use case, then draw objects corresponding to those classes. Be logical (or imaginative – whatever floats your boat) in creating some message calls (function calls) between objects and represent lifeline.
|
1e8b6b86cb301c1e7b69048b4bf33578
|
{
"intermediate": 0.21057787537574768,
"beginner": 0.6605043411254883,
"expert": 0.12891772389411926
}
|
41,135
|
Can you write uml code for the following:
Draw a class diagram for a travel booking website (10 M)
a. Identify and draw classes involved (4 M)
b. Identify and mention attributes and properties of classes (2 M)
c. Identify and draw relationships between classes (4 M)
Hints:
a. For class diagrams, identify the entities (or actors) that interact with the booking system – it can be a user making a booking, and the software that the user interacts with is also an entity (or actor). This software will consist of several entities that make it up. All entities can be mapped to classes. Give classes some attributes and operations as you like to. TIP: Check the class diagram above for more on classes, and relationships
It should have booking for hotel, transport or an event
b. For sequence diagrams, pick any use case in your booking system
|
3f66ca14800b19bfb62fdc9186b5f4fd
|
{
"intermediate": 0.12099909782409668,
"beginner": 0.7811812162399292,
"expert": 0.09781971573829651
}
|
41,136
|
give Plant UML code for: Draw a sequence diagram (picking any particular use case) for a travel booking website
a.Identify and draw objects involved in chosen use case
b.Identify and mention messages exchanged between objects identified in a.
c. Identify and draw timelines of objects identified in a.
|
688dacb9e27e01df99d8f7984b34d51f
|
{
"intermediate": 0.4685056209564209,
"beginner": 0.26964589953422546,
"expert": 0.26184841990470886
}
|
41,137
|
provide me the complete updated code which including everything to my requirement and replacement to the below error code:
Error:
WARNING:tensorflow:Gradients do not exist for variables ['actor_critic/dense_2/kernel:0', 'actor_critic/dense_2/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument?
WARNING:tensorflow:Gradients do not exist for variables ['actor_critic/dense_2/kernel:0', 'actor_critic/dense_2/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument?
# Define the neural network architecture for the actor and critic
class ActorCritic(tf.keras.Model):
def __init__(self, n_actions):
super(ActorCritic, self).__init__()
self.dense1 = Dense(64, activation='relu')
self.policy = Dense(n_actions, activation='softmax')
self.value = Dense(1, activation=None)
def call(self, state):
x = self.dense1(state)
policy = self.policy(x)
value = self.value(x)
return policy, value
# PPO Agent
class PPOAgent:
def __init__(self, input_dims, alpha=0.0003, beta=0.0003, gamma=0.99, n_actions=13):
self.actor_critic = ActorCritic(n_actions)
self.actor_critic.compile(optimizer=Adam(learning_rate=alpha))
self.gamma = gamma
self.n_actions = n_actions
self.action_space = np.arange(n_actions)
def choose_action(self, input_observation):
state = tf.convert_to_tensor([input_observation], dtype=tf.float32)
probabilities, _ = self.actor_critic(state)
action_probs = tfp.distributions.Categorical(probs=probabilities)
action = action_probs.sample()
return action.numpy()[0]
def learn(self, state, action, reward, next_state, done):
state = tf.convert_to_tensor([state], dtype=tf.float32)
next_state = tf.convert_to_tensor([next_state], dtype=tf.float32)
reward = tf.convert_to_tensor(reward, dtype=tf.float32)
with tf.GradientTape(persistent=True) as tape:
state_value, _ = self.actor_critic(state)
next_state_value, _ = self.actor_critic(next_state)
state_value = tf.squeeze(state_value)
next_state_value = tf.squeeze(next_state_value)
critic_loss = tf.math.square(reward + self.gamma * next_state_value * (1 - int(done)) - state_value)
probabilities, _ = self.actor_critic(state)
action_probs = tfp.distributions.Categorical(probs=probabilities)
log_probs = action_probs.log_prob(action)
ratios = tf.exp(log_probs - action_probs.entropy())
actor_loss = -tf.math.minimum(ratios * reward, tf.clip_by_value(ratios, 1.0 - 0.2, 1.0 + 0.2) * reward)
total_loss = critic_loss + actor_loss
gradient_actor = tape.gradient(total_loss, self.actor_critic.trainable_variables)
self.actor_critic.optimizer.apply_gradients(zip(gradient_actor, self.actor_critic.trainable_variables))
# Define the values for server_address, username, and password
server_address = '10.110.5.59'
username = '22phd0696'
password = 'student'
# Initialize environment and agent
env = CustomEnvironment(server_address, username, password)
# Determine the value of 'n' for the 'input_dims'
n = len(bounds_low)
agent = PPOAgent(input_dims=n, n_actions=13)
# Training loop
n_episodes = 1000
for episode in range(n_episodes):
done = False
input_observation, perf_observation = env.reset() # Get both states
episode_reward = 0
while not done:
action = agent.choose_action(input_observation)
next_input_observation, current_perf_observation, reward, done, _ = env.step(action)
agent.learn(input_observation, action, reward, next_input_observation, done)
episode_reward += reward
input_observation = next_input_observation
print(f"Episode: {episode + 1}, Reward: {episode_reward}")
logging.info(f"Episode: {episode + 1}, Reward: {episode_reward}")
if done:
print("Mean reward reached 0. Training terminated.")
break
env.close()
|
b4ed8c461f2012f42c5705ecbd2225ac
|
{
"intermediate": 0.28728073835372925,
"beginner": 0.38933077454566956,
"expert": 0.32338854670524597
}
|
41,138
|
I have a urdf file.
In the context of ros2 using a python launch file, explain the steps to load this URDF in gazebo
|
98660e3bd0483ce15f4bf2ed64549f87
|
{
"intermediate": 0.6644042134284973,
"beginner": 0.1552315652370453,
"expert": 0.1803642213344574
}
|
41,139
|
I have a xacro file of a gp88 robot. generate a luanch file in ros1 to visualize it in gazebo and rviz
|
954f95445c928d24e319e5a84abe126d
|
{
"intermediate": 0.33865129947662354,
"beginner": 0.28111448884010315,
"expert": 0.3802342712879181
}
|
41,140
|
I have a xacro file of a gp88 robot. generate a launchfile in ros1 to visualize it in gazebo
|
ed3108b975b095550c93ab7630c949cf
|
{
"intermediate": 0.36178115010261536,
"beginner": 0.2803817689418793,
"expert": 0.3578370213508606
}
|
41,141
|
File d:\opamp circuits\rl\rl_code8.py:292
env = CustomEnvironment(server_address, username, password)
TypeError: CustomEnvironment() takes no arguments
# Define bounds
bounds_low = np.array([0.18e-6, 0.18e-6, 0.18e-6, 0.18e-6, 0.18e-6, 1e-6, 2.52e-6, 2.16e-6, 32e-6, 13.5e-6, 15e-6, 0.44e-12, 0.8])
bounds_high = np.array([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 12e-6, 28e-6, 24e-6, 100e-6, 100e-6, 30e-6, 3e-12, 1.5])
class CustomEnvironment(gym.Env):
def init(self, server_address, username, password, bounds_low, bounds_high):
# Continuous action space adjustment each parameter has 3 actions
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(len(bounds_low) * 3,), dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=np.inf, shape=(len(bounds_low),), dtype=np.float32)
self.bounds_low = bounds_low
self.bounds_high = bounds_high
self.simulator = None
self.input_parameters = None
self.current_state = None
self.server_address = '10.110.5.59'
self.username = '22phd0696'
self.password = 'student'
self.delta = (self.bounds_high - self.bounds_low) / 100 # Adjusting factor for increment/decrement
# Initialize termination condition variables
self.num_specifications_met = 0
self.num_specifications = 8 # Assuming there are 8 specifications
# Define the values for server_address, username, and password
server_address = '10.110.5.59'
username = '22phd0696'
password = 'student'
# Initialize environment and agent
env = CustomEnvironment(server_address, username, password, bounds_low, bounds_high)
agent = PPOAgent(bounds_low, bounds_high)
|
3414a24d12900483c3cffbb66b98bd58
|
{
"intermediate": 0.39540570974349976,
"beginner": 0.3692989945411682,
"expert": 0.23529532551765442
}
|
41,142
|
hi ther
|
49f75ca3db0c429759ca172d0ceebcfc
|
{
"intermediate": 0.32207369804382324,
"beginner": 0.24952194094657898,
"expert": 0.4284043610095978
}
|
41,143
|
# Remove series with only 0 values
y_cl4 = y_cl4.filter(pl.col('unique_id') != '22028504_US01_8_1108389')
# Display 200 rows in Polars output
pl.Config.set_tbl_rows(200)
# Function to perform the grouping, counting, and sorting of lengths
def group_count_sort(y_cl4, length_threshold=None):
lengths = y_cl4.groupby('unique_id').agg(pl.count().alias('length'))
if length_threshold:
lengths = lengths.filter(pl.col('length') > length_threshold)
counts = lengths.groupby('length').agg(pl.count().alias('count')).sort('length')
return lengths, counts
# Lengths for all series
all_lengths, all_counts = group_count_sort(y_cl4)
print(all_counts)
# Function to filter y_cl4 based on lengths
def filter_and_sort(y_cl4, lengths):
y_cl4_filtered = y_cl4.join(
lengths.select(pl.col('unique_id')),
on='unique_id',
how='semi'
)
return y_cl4_filtered.sort('ds')
# Lengths greater than 31
lengths_over_31, counts_for_over_31 = group_count_sort(y_cl4, 31)
y_cl4_over_31 = filter_and_sort(y_cl4, lengths_over_31)
print(counts_for_over_31)
# Lengths greater than 15
lengths_over_15, counts_for_over_15 = group_count_sort(y_cl4, 15)
y_cl4_over_15 = filter_and_sort(y_cl4, lengths_over_15)
print(counts_for_over_15)
this was used on an ensemble model, # Lengths greater than 29
lengths_over_29, counts_for_over_29 = group_count_sort(y_cl4, 29)
y_cl4_over_29 = filter_and_sort(y_cl4, lengths_over_29)
print(counts_for_over_29)
# Lengths greater than 9
lengths_over_9, counts_for_over_9 = group_count_sort(y_cl4, 9)
y_cl4_over_9 = filter_and_sort(y_cl4, lengths_over_9)
print(counts_for_over_9) this was sused for ensemble model but different datasets from statsforecast import StatsForecast
from statsforecast.models import AutoARIMA, AutoETS, DynamicOptimizedTheta, IMAPA
from statsforecast.utils import ConformalIntervals
import numpy as np
import polars as pl
# Polars option to display all rows
pl.Config.set_tbl_rows(None)
# Initialize the models
models = [
AutoARIMA(season_length=12),
AutoETS(damped=True, season_length=12),
DynamicOptimizedTheta(season_length=12),
IMAPA()
]
# Initialize the StatsForecast model
sf = StatsForecast(models=models, freq='1w', n_jobs=-1)
# Perform cross-validation with a step size of 1 to mimic an expanding window
crossvalidation_df = sf.cross_validation(df=y_cl4_over_29, h=1, step_size=1, n_windows=8, sort_df=True)
# Calculate the ensemble mean
ensemble = crossvalidation_df[['AutoARIMA', 'AutoETS', 'DynamicOptimizedTheta', 'IMAPA']].mean(axis=1)
# Create a Series for the ensemble mean
ensemble_series = pl.Series('Ensemble', ensemble)
# Add the ensemble mean as a new column to the DataFrame
crossvalidation_df = crossvalidation_df.with_columns(ensemble_series)
def wmape(y_true, y_pred):
return np.abs(y_true - y_pred).sum() / np.abs(y_true).sum()
# Calculate the WMAPE for the ensemble model
wmape_value = wmape(crossvalidation_df['y'], crossvalidation_df['Ensemble'])
print('Average WMAPE for Ensemble: ', round(wmape_value, 4))
# Calculate the errors for the ensemble model
errors = crossvalidation_df['y'] - crossvalidation_df['Ensemble']
# For an individual forecast
individual_accuracy = 1 - (abs(crossvalidation_df['y'] - crossvalidation_df['Ensemble']) / crossvalidation_df['y'])
individual_bias = (crossvalidation_df['Ensemble'] / crossvalidation_df['y']) - 1
# Add these calculations as new columns to DataFrame
crossvalidation_df = crossvalidation_df.with_columns([
individual_accuracy.alias("individual_accuracy"),
individual_bias.alias("individual_bias")
])
# Print the individual accuracy and bias for each week
for row in crossvalidation_df.to_dicts():
id = row['unique_id']
date = row['ds']
accuracy = row['individual_accuracy']
bias = row['individual_bias']
print(f"{id}, {date}, Individual Accuracy: {accuracy:.4f}, Individual Bias: {bias:.4f}")
# For groups of forecasts
group_accuracy = 1 - (errors.abs().sum() / crossvalidation_df['y'].sum())
group_bias = (crossvalidation_df['Ensemble'].sum() / crossvalidation_df['y'].sum()) - 1
# Print the average group accuracy and group bias over all folds for the ensemble model
print('Average Group Accuracy: ', round(group_accuracy, 4))
print('Average Group Bias: ', round(group_bias, 4))
# Fit the models on the entire dataset
sf.fit(y_cl4_over_9)
# Instantiate the ConformalIntervals class
prediction_intervals = ConformalIntervals()
# Generate 24 months forecasts
forecasts_df = sf.forecast(h=52*2, prediction_intervals=prediction_intervals, level=[95], id_col='unique_id', sort_df=True)
# Apply the non-negative constraint to the forecasts of individual models
forecasts_df = forecasts_df.with_columns([
pl.when(pl.col('AutoARIMA') < 0).then(0).otherwise(pl.col('AutoARIMA')).alias('AutoARIMA'),
pl.when(pl.col('AutoETS') < 0).then(0).otherwise(pl.col('AutoETS')).alias('AutoETS'),
pl.when(pl.col('DynamicOptimizedTheta') < 0).then(0).otherwise(pl.col('DynamicOptimizedTheta')).alias('DynamicOptimizedTheta'),
pl.when(pl.col('IMAPA') < 0).then(0).otherwise(pl.col('IMAPA')).alias('IMAPA'),
pl.when(pl.col('AutoARIMA-lo-95') < 0).then(0).otherwise(pl.col('AutoARIMA-lo-95')).alias('AutoARIMA-lo-95'),
pl.when(pl.col('AutoETS-lo-95') < 0).then(0).otherwise(pl.col('AutoETS-lo-95')).alias('AutoETS-lo-95'),
pl.when(pl.col('DynamicOptimizedTheta-lo-95') < 0).then(0).otherwise(pl.col('DynamicOptimizedTheta-lo-95')).alias('DynamicOptimizedTheta-lo-95'),
pl.when(pl.col('IMAPA-lo-95') < 0).then(0).otherwise(pl.col('IMAPA-lo-95')).alias('IMAPA-lo-95')
])
# Calculate the ensemble forecast
ensemble_forecast = forecasts_df[['AutoARIMA', 'AutoETS', 'DynamicOptimizedTheta', 'IMAPA']].mean(axis=1)
# Calculate the lower and upper prediction intervals for the ensemble forecast
ensemble_lo_95 = forecasts_df[['AutoARIMA-lo-95', 'AutoETS-lo-95', 'DynamicOptimizedTheta-lo-95', 'IMAPA-lo-95']].mean(axis=1)
ensemble_hi_95 = forecasts_df[['AutoARIMA-hi-95', 'AutoETS-hi-95', 'DynamicOptimizedTheta-hi-95', 'IMAPA-hi-95']].mean(axis=1)
# Create Series for the ensemble forecast and its prediction intervals
ensemble_forecast_series = pl.Series('EnsembleForecast', ensemble_forecast)
ensemble_lo_95_series = pl.Series('Ensemble-lo-95', ensemble_lo_95)
ensemble_hi_95_series = pl.Series('Ensemble-hi-95', ensemble_hi_95)
# Add the ensemble forecast and its prediction intervals as new columns to the DataFrame
forecasts_df = forecasts_df.with_columns([ensemble_forecast_series, ensemble_lo_95_series, ensemble_hi_95_series])
# Round the ensemble forecast and prediction intervals and convert to integer
forecasts_df = forecasts_df.with_columns([
pl.col("EnsembleForecast").round().cast(pl.Int32),
pl.col("Ensemble-lo-95").round().cast(pl.Int32),
pl.col("Ensemble-hi-95").round().cast(pl.Int32)
])
# Split the unique_id concat into the original columns
def split_unique_id(unique_id):
parts = unique_id.split('_')
return parts if len(parts) >= 4 else (parts + [None] * (4 - len(parts)))
forecasts_df = (
forecasts_df
.with_columns([
pl.col('unique_id').apply(lambda uid: split_unique_id(uid)[0]).alias('MaterialID'),
pl.col('unique_id').apply(lambda uid: split_unique_id(uid)[1]).alias('SalesOrg'),
pl.col('unique_id').apply(lambda uid: split_unique_id(uid)[2]).alias('DistrChan'),
pl.col('unique_id').apply(lambda uid: split_unique_id(uid)[3]).alias('CL4'),
])
.drop('unique_id')
)
# Rename ‘ds’ to ‘WeekDate’
forecasts_df = forecasts_df.rename({'ds': 'WeekDate'})
# Reorder the columns
forecasts_df = forecasts_df.select([
"MaterialID",
"SalesOrg",
"DistrChan",
"CL4",
"WeekDate",
"EnsembleForecast",
"Ensemble-lo-95",
"Ensemble-hi-95",
"AutoARIMA",
"AutoARIMA-lo-95",
"AutoARIMA-hi-95",
"AutoETS",
"AutoETS-lo-95",
"AutoETS-hi-95",
"DynamicOptimizedTheta",
"DynamicOptimizedTheta-lo-95",
"DynamicOptimizedTheta-hi-95",
"IMAPA",
"IMAPA-lo-95",
"IMAPA-hi-95"
])
# Create an empty list
forecasts_list = []
# Append each row to the list
for row in forecasts_df.to_dicts():
forecasts_list.append(row)
# Print the list
for forecast in forecasts_list:
print(forecast) {'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 6, 2, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 6, 9, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 6, 16, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 6, 23, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 6, 30, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 7, 7, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 7, 14, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 7, 21, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 7, 28, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 8, 4, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 8, 11, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 8, 18, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 8, 25, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 9, 1, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 9, 8, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 9, 15, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 9, 22, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 9, 29, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 10, 6, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 10, 13, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 10, 20, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 10, 27, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 11, 3, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 11, 10, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta-hi-95': 0.0, 'IMAPA': 0.0, 'IMAPA-lo-95': 0.0, 'IMAPA-hi-95': 0.0}
{'MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389', 'WeekDate': datetime.datetime(2025, 11, 17, 0, 0), 'EnsembleForecast': 0, 'Ensemble-lo-95': 0, 'Ensemble-hi-95': 0, 'AutoARIMA': 0.0, 'AutoARIMA-lo-95': 0.0, 'AutoARIMA-hi-95': 0.0, 'AutoETS': 0.0, 'AutoETS-lo-95': 0.0, 'AutoETS-hi-95': 0.0, 'DynamicOptimizedTheta': 0.0, 'DynamicOptimizedTheta-lo-95': 0.0, 'DynamicOptimizedTheta- why is MaterialID': '22027032', 'SalesOrg': 'US01', 'DistrChan': '8', 'CL4': '1108389' still showing up even though it was removed in the first block?
|
aa0cb9181956d7f9ed39a3776ef6f6ec
|
{
"intermediate": 0.3104482889175415,
"beginner": 0.3198468089103699,
"expert": 0.369704931974411
}
|
41,144
|
you need to modify the following code as i want readings of mq135 and mq7 in this format : ( 1.3, 2.0 ).
// Define the analog pin numbers for each sensor
#define MQ135_PIN A0
#define MQ7_PIN A1
void setup() {
// Initialize Serial communication
Serial.begin(9600);
}
void loop() {
// Read air quality data from MQ-135 sensor
int airQualityMQ135 = analogRead(MQ135_PIN);
float voltageMQ135 = airQualityMQ135 * (5.0 / 1023.0);
// Read carbon monoxide data from MQ-7 sensor
int carbonMonoxideMQ7 = analogRead(MQ7_PIN);
float voltageMQ7 = carbonMonoxideMQ7 * (5.0 / 1023.0);
// Print the readings
Serial.print("Air Quality (MQ-135): ");
Serial.println(voltageMQ135);
Serial.print("Carbon Monoxide (MQ-7): ");
Serial.println(voltageMQ7);
// Delay before next readings
delay(20000); // Adjust delay as needed
}
|
928e49b1bc632c7924ec015ee5afa016
|
{
"intermediate": 0.39193210005760193,
"beginner": 0.3940403461456299,
"expert": 0.214027538895607
}
|
41,145
|
# the first 3 tasks with use mtcars data. check out the data frame using:
head(mtcars) # Task 2: correlation matrix
# use the mtcars data for this task.
# show the correlation matrix plot for the variables cyl, hp, drat, qsec, vs.
# set significance level at 0.01, and leave non-significant coefficients blank
|
573668f4cf0876de82a99a2d64e9fbaa
|
{
"intermediate": 0.5125584006309509,
"beginner": 0.17468678951263428,
"expert": 0.3127548098564148
}
|
41,146
|
A firm must decide whether to increase the prices or not. The firm expects the same level of profits if the prices are maintained. Changes in the profits will occur when the prices are increased. Depending on the reaction of the competitors, quiet (50% of probabilities) or aggressive, the profits are going to increase by 40 billion /year or decrease by 20 billion year, respectively.
Assume that the firm is considering the creation of a department of Data Analytics and Competitive Intelligence. The function of the department is to provide information about the possible reaction of the competitors, aggressive, or quiet.
Develop the decision tree of the firm depending on the information provided by the Data Analytics and Competitive Intelligence Department.
Assume that the Data Analytics and Competitive Intelligence Department is dedicated full time to providing this information.
a)
Which is the maximum annual budget that the CEO can allocate to the Department (assuming that it provides perfect information)?
b)
What is the minimum quality (qM) of the information provided by the department that justifies its creation? Why? What are the prices decisions of the firms when the quality of the information is below or above qM?
The department is giving a 20% of false positives, when the Data Analytics and Competitive Intelligence Department says that the competitors’ reaction is quiet, the real reaction is aggressive.
c)
What is the quality of the information provided by the Data Analytics and Competitive Intelligence Department?
d)
What is the maximum annual budget of the Data Analytics and Competitive Intelligence Department that economically justifies such information quality?
e)
What is the probability that the firm changes prices and the reaction of the competitors is quiet?
The Data Analytics and Competitive Intelligence Department requires an increase of 1,3 billion $ in its annual budget to improve the quality of its information to 70%, q=0,7.
h) What is going to be the number of false positives and false negatives in the case that the investment is made?
f)
Does this proposal make economic sense? please answer only the last question
|
fecaab3221fc5617944936e3999bec2a
|
{
"intermediate": 0.25800174474716187,
"beginner": 0.3746642768383026,
"expert": 0.36733391880989075
}
|
41,147
|
huggingface.co
|
e036dbf564566012cc7e98617baad432
|
{
"intermediate": 0.33114102482795715,
"beginner": 0.3282666802406311,
"expert": 0.34059226512908936
}
|
41,148
|
how to use two configurations in flask-mail
|
39b88139275c4cee28b65f3b1cfa7c8a
|
{
"intermediate": 0.5327135920524597,
"beginner": 0.17520524561405182,
"expert": 0.29208117723464966
}
|
41,149
|
html page with one button. By pressing the button, it adds a new div with the buttons text and a qr-code. The new div will be 300px width and 150px height.
|
368fd2bc6bd2b0845412324c8981005a
|
{
"intermediate": 0.3827432692050934,
"beginner": 0.23731659352779388,
"expert": 0.3799401819705963
}
|
41,150
|
In this below code the statement "log_chosen_probs = tf.math.log(chosen_probs)" was assigned but never used with in the function "def learn(self, state, action_probs, reward, next_state, done)" in 'class PPOAgent'.
class PPOAgent:
def __init__(self, bounds_low, bounds_high, alpha=0.0003, gamma=0.99):
self.bounds_low = bounds_low
self.bounds_high = bounds_high
self.delta = (bounds_high - bounds_low) / 100.0 # Smallest unit to update the parameter
self.gamma = gamma
self.num_params = len(bounds_low)
self.actor_critic = ActorCritic(self.num_params)
self.optimizer = Adam(learning_rate=alpha)
def choose_action(self, input_observation):
state = tf.convert_to_tensor([input_observation], dtype=tf.float32)
logits, _ = self.actor_critic(state)
action_probs = tf.nn.softmax(logits, axis=-1)
return action_probs.numpy()[0]
def learn(self, state, action_probs, reward, next_state, done):
state = tf.convert_to_tensor([state], dtype=tf.float32)
next_state = tf.convert_to_tensor([next_state], dtype=tf.float32)
reward = tf.convert_to_tensor(reward, dtype=tf.float32)
with tf.GradientTape() as tape:
current_logits, current_value = self.actor_critic(state)
current_value = tf.squeeze(current_value)
_, next_value = self.actor_critic(next_state)
next_value = tf.squeeze(next_value)
advantage = reward + self.gamma * next_value * (1 - int(done)) - current_value
value_loss = advantage ** 2
# Get probs and actions_taken for each timestamp
probs = tf.nn.softmax(current_logits, -1)
actions_taken = tf.cast(tf.argmax(action_probs, axis=-1), dtype=tf.int32)
# Create a vector of cumulative dimensions for indices
gather_indices = tf.range(tf.shape(probs)[0]) * tf.shape(probs)[1] + actions_taken
# Gather probabilities for selected actions
chosen_probs = tf.gather(tf.reshape(probs, [-1]), gather_indices)
# Log probs for the entropy calculation
log_chosen_probs = tf.math.log(chosen_probs)
# Calculate entropy
entropy = -tf.reduce_sum(probs * tf.math.log(probs + 1e-9), axis=-1)
# Calculate log probability of the chosen action
log_probs = tf.math.log(chosen_probs)
# Calculate advantage and losses
advantage = reward + self.gamma * next_value * (1 - int(done)) - current_value
policy_loss = -log_probs * tf.stop_gradient(advantage)
value_loss = advantage ** 2
total_loss = policy_loss + 0.5 * value_loss - 0.01 * entropy
gradients = tape.gradient(total_loss, self.actor_critic.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.actor_critic.trainable_variables))
|
6fcfde72964a8cb563edb4a5fbf12e13
|
{
"intermediate": 0.3465691804885864,
"beginner": 0.4374981224536896,
"expert": 0.21593275666236877
}
|
41,151
|
html page with one button. By pressing the button, it adds a new div with the buttons text and a qr-code. The new div will be 300px width and 150px height.
|
1b5dfaad5b59bb4725c867c755f8f049
|
{
"intermediate": 0.3827432692050934,
"beginner": 0.23731659352779388,
"expert": 0.3799401819705963
}
|
41,152
|
HTML page with one button and an empty div
|
866aaa1a42853dc2ac6369dd4bcedc1e
|
{
"intermediate": 0.37241387367248535,
"beginner": 0.2993278503417969,
"expert": 0.3282582461833954
}
|
41,153
|
In this below code the statement "log_chosen_probs = tf.math.log(chosen_probs)" was assigned but never used with in the function "def learn(self, state, action_probs, reward, next_state, done)" in 'class PPOAgent', the said statement was calculated for the entropy calculation, but it was not included into the "entropy = -tf.reduce_sum(probs * tf.math.log(probs + 1e-9), axis=-1)". how to include the statement of calculation "log_chosen_probs" with in the logic appropriately fit in to the present logic.
class PPOAgent:
def __init__(self, bounds_low, bounds_high, alpha=0.0003, gamma=0.99):
self.bounds_low = bounds_low
self.bounds_high = bounds_high
self.delta = (bounds_high - bounds_low) / 100.0 # Smallest unit to update the parameter
self.gamma = gamma
self.num_params = len(bounds_low)
self.actor_critic = ActorCritic(self.num_params)
self.optimizer = Adam(learning_rate=alpha)
def choose_action(self, input_observation):
state = tf.convert_to_tensor([input_observation], dtype=tf.float32)
logits, _ = self.actor_critic(state)
action_probs = tf.nn.softmax(logits, axis=-1)
return action_probs.numpy()[0]
def learn(self, state, action_probs, reward, next_state, done):
state = tf.convert_to_tensor([state], dtype=tf.float32)
next_state = tf.convert_to_tensor([next_state], dtype=tf.float32)
reward = tf.convert_to_tensor(reward, dtype=tf.float32)
with tf.GradientTape() as tape:
current_logits, current_value = self.actor_critic(state)
current_value = tf.squeeze(current_value)
_, next_value = self.actor_critic(next_state)
next_value = tf.squeeze(next_value)
advantage = reward + self.gamma * next_value * (1 - int(done)) - current_value
value_loss = advantage ** 2
# Get probs and actions_taken for each timestamp
probs = tf.nn.softmax(current_logits, -1)
actions_taken = tf.cast(tf.argmax(action_probs, axis=-1), dtype=tf.int32)
# Create a vector of cumulative dimensions for indices
gather_indices = tf.range(tf.shape(probs)[0]) * tf.shape(probs)[1] + actions_taken
# Gather probabilities for selected actions
chosen_probs = tf.gather(tf.reshape(probs, [-1]), gather_indices)
# Log probs for the entropy calculation
log_chosen_probs = tf.math.log(chosen_probs)
# Calculate entropy
entropy = -tf.reduce_sum(probs * tf.math.log(probs + 1e-9), axis=-1)
# Calculate log probability of the chosen action
log_probs = tf.math.log(chosen_probs)
# Calculate advantage and losses
advantage = reward + self.gamma * next_value * (1 - int(done)) - current_value
policy_loss = -log_probs * tf.stop_gradient(advantage)
value_loss = advantage ** 2
total_loss = policy_loss + 0.5 * value_loss - 0.01 * entropy
gradients = tape.gradient(total_loss, self.actor_critic.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.actor_critic.trainable_variables))
|
a623dc46fff24b20cc8eb1df54a09723
|
{
"intermediate": 0.27614420652389526,
"beginner": 0.5045163631439209,
"expert": 0.21933941543102264
}
|
41,154
|
Writeu funciton on c# which converts uint to ushort array with check of little or big endian
|
379636c75d623cc54be0fc4ca20f88ef
|
{
"intermediate": 0.4891495108604431,
"beginner": 0.2523522973060608,
"expert": 0.2584981918334961
}
|
41,155
|
can i use chat gpt 4 here
|
c3fb73c8d048f0eb95856f698343001d
|
{
"intermediate": 0.33365190029144287,
"beginner": 0.2553946375846863,
"expert": 0.41095349192619324
}
|
41,156
|
Write funciton on c# which convert uint to ushort array with check of little or big endian
|
418eb1b86d6bae62a68d752e512698ab
|
{
"intermediate": 0.5037763118743896,
"beginner": 0.28875166177749634,
"expert": 0.2074720412492752
}
|
41,157
|
export default class extends mixins(ResizeMixin) {
@Prop({ default: 'chart' }) private className!: string
private initChart() {
this.chart = echarts.init(this.$el as HTMLDivElement)
this.chart.setOption({})
}
beforeDestroy() {
if (!this.chart) {
return
}
this.chart.dispose()
this.chart = null
}
)
как переписать на vue 3
|
51fdd0e1e2cf142117c87917292b1c20
|
{
"intermediate": 0.37909388542175293,
"beginner": 0.425575315952301,
"expert": 0.19533076882362366
}
|
41,158
|
// C++ code to print all possible
// subsequences for given array using
// recursion
#include <bits/stdc++.h>
using namespace std;
// Recursive function to print all
// possible subsequences for given array
void printSubsequences(int arr[], int index,
vector<int> &subarr,int n)
{
// Print the subsequence when reach
// the leaf of recursion tree
if (index == n)
{
for (auto it:subarr){
cout << it << " ";
}
if(subarr.size()==0)
cout<<"{}";
cout<<endl;
return;
}
else
{
//pick the current index into the subsequence.
subarr.push_back(arr[index]);
printSubsequences(arr, index + 1, subarr,n);
subarr.pop_back();
//not picking the element into the subsequence.
printSubsequences(arr, index + 1, subarr,n);
}
}
// Driver Code
int main()
{
int arr[]={1, 2, 3};
int n=sizeof(arr)/sizeof(arr[0]);
vector<int> vec;
printSubsequences(arr, 0, vec,n);
return 0;
}
// This code is contributed by
// vivekr4400
give code in c
|
c21ccb1e13dbd7009befa05a58b55b66
|
{
"intermediate": 0.34528419375419617,
"beginner": 0.3702465891838074,
"expert": 0.28446921706199646
}
|
41,159
|
<html>
<head>
<title>一分钟模式</title>
</head>
<body>
<script type="text/javascript">
var LeqT = new Array();
var noiseCounter = prompt("输入噪声点位数","");
noiseCounter = Number(noiseCounter) - 1;
var noiseNumber = prompt("输入第一个噪声序号,可以是任何数字,比如27,10等","");
noiseNumber = Number(noiseNumber) - 1;
var noiseDay = prompt("请输入噪声的测量日期,格式如2017-03-01","");
//获得日期
var noiseTime = prompt("请输入第一个噪声的测量时间,格式如09:24:33","");
var hourTime = noiseTime.substr(0,2);
var minTime = noiseTime.substr(3,2);
var secTime = noiseTime.substr(6,2);
hourTime = Number(hourTime);
minTime = Number(minTime);
secTime = Number(secTime);
var i;
for (i = 0; i <= noiseCounter; i++)
{
var inputcounter = i + 1;
LeqT[i] = prompt("请输入第 "+ inputcounter +" 个值:" + "比如56.3", "");
LeqT[i] = Number(LeqT[i]);
}
//将噪声数据保存在数组中且转为数字类型
for (noiseWrite=0;noiseWrite < LeqT.length; noiseWrite++)
{
noiseNumber = noiseNumber + 1;//变序号
var varliang = parseInt(3*Math.random());
minTime = minTime + 3 + varliang;
secTime = secTime + 11 + varliang;
var L5;
var L10;
var L50;
var L90;
var L95;
var Lmax;
var Lmin;
var SD;
var SEL;
var Alternum=Math.random();
Alternum = Alternum.toFixed(1);
var Alternum1 = Math.random();
Alternum1 = Alternum.toFixed(1);
var Alternum2 = Math.random();
Alternum2 = Alternum2.toFixed(1);
var useful = Math.random();
useful = useful*2;
useful = Math.ceil(useful);
//修约小数为一位数
L5 = Number(LeqT[noiseWrite]) + 1.8 + Number(useful)+Number(Alternum);
L10 = Number(LeqT[noiseWrite]) + 1.1 + Number(Alternum);
L50 = Number(LeqT[noiseWrite]) - 1.4 - Number(Alternum);
L90 = Number(L50) - 0.8 - Number(Alternum2);
L95 = Number(L90) - 0.7 - Number(Alternum);
L50 = L50.toFixed(1);
L5 = L5.toFixed(1);
L10 = L10.toFixed(1);
L90 = L90.toFixed(1);
L95 = L95.toFixed(1);
SD = Math.random();
SD = SD + 0.1 + (LeqT[noiseWrite] - L95)/2.6;
SD = SD.toFixed(1);
Lmax= Number(L5) + 3*Number(Alternum) + 2.3*Number(Alternum2) + 2.1;
Lmin= Number(L95) - 1.5*Number(Alternum2) - 0.4;
SEL = Number(Lmax) + 6 + 0.8*Number(Alternum2) + 0.3*Number(Alternum);
Lmax= Lmax.toFixed(1);
Lmin= Lmin.toFixed(1);
SEL = SEL.toFixed(1);
if (noiseNumber < 100 && noiseNumber >= 10)
{
document.write(" " + "No:00" + noiseNumber + " ");
}
else if (noiseNumber < 10)
{
document.write(" " + "No:000" + noiseNumber + " ");
}
else
{
document.write(" " + "No:0" + noiseNumber + " ");
}
document.write(" " + "Mode:stat." + "</br>");
if(hourTime >= 10)
{
if (minTime < 10)
{
if (secTime < 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + "0" + minTime + ":" + "0" + secTime + "</br>");
}
else if (secTime >= 10 && secTime <60)
{
document.write(" " + noiseDay + " " + hourTime + ":" + "0" + minTime + ":" + secTime + "</br>");
}
else
{
var secwrite = secTime - 60;
if (secwrite < 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + "0" + minTime + "0" + secwrite + "</br>");
}
else
{
document.write(" " + noiseDay + " " + hourTime + ":" + "0" + minTime + ":" + secwrite + "</br>");
}
}
}
else if (minTime >=10 && minTime < 60)
{
if (secTime < 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + minTime + ":" +"0" + secTime + "</br>");
}
else if (secTime >= 10 && secTime <60)
{
document.write(" " + noiseDay + " " + hourTime + ":" + minTime + ":" + secTime + "</br>");
}
else
{
var secwrite = secTime - 60;
if (secwrite < 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + minTime + ":" +"0" + secwrite + "</br>");
}
else
{
document.write(" " + noiseDay + " " + hourTime + ":" + minTime + ":" +secwrite + "</br>");
}
}
}
else
{
minTime = minTime - 60;
hourTime = hourTime + 1;
if (minTime < 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + "0" + minTime + ":" + "22" + "</br>");
}
else if (minTime >=10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + minTime + ":" + "47" + "</br>");
}
}
}
if(hourTime < 10)
{
if (minTime < 10)
{
if (secTime < 10)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + "0" + minTime + "0" + secTime + "</br>");
}
else if (secTime >= 10 && secTime <60)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + "0" + minTime + secTime + "</br>");
}
else
{
var secwrite = secTime - 60;
if (secwrite < 10)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + "0" + minTime + "0" + secwrite + "</br>");
}
else
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + "0" + minTime + secwrite + "</br>");
}
}
}
else if (minTime >=10 && minTime < 60)
{
if (secTime < 10)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + minTime + ":" +"0" + secTime + "</br>");
}
else if (secTime >= 10 && secTime <60)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + minTime + ":" + secTime + "</br>");
}
else
{
var secwrite = secTime - 60;
if (secwrite < 10)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + minTime + ":" +"0" + secwrite + "</br>");
}
else
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + minTime + ":" +secwrite + "</br>");
}
}
}
else
{
minTime = minTime - 60;
hourTime = hourTime + 1;
if (minTime < 10)
{
if ( hourTime < 10)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + "0" + minTime + ":" + "19" + "</br>");
}
else if (hourTime >= 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + "0" + minTime + ":" + "43" + "</br>");
}
}
else if (minTime >=10)
{
if ( hourTime < 10)
{
document.write(" " + noiseDay + " " + "0" + hourTime + ":" + minTime + ":" + "33" + "</br>");
}
else if (hourTime >= 10)
{
document.write(" " + noiseDay + " " + hourTime + ":" + minTime + ":" + "52" + "</br>");
}
}
}
}
document.write(" " + "Ts=00h01m00s" + " "+ "Weight:A F" + "</br>");
document.write(" " + "LeqT= " + LeqT[noiseWrite] + "dB" + " " +"L5" + " " + " = " + L5 + "dB" + "</br>");
document.write(" " + "L10 = " + L10 + "dB" + " " + "L50 = " + L50 + "dB" + "</br>");
document.write(" " + "L90 = " + L90 + "dB" + " " + "L95 = " + L95 +"dB" + "</br>");
document.write(" " + "Lmax= " + Lmax + "dB" + " " + "Lmin= " + Lmin + "dB" + "</br>");
document.write(" " + "SD = " + SD + "dB" + " " + "SEL = " + SEL + "dB" + "</br>" + "</br>" + "</br>");
}
</script>
</body>
</html>
LeqT填在B3格式框里。请用excel的函数公式来表示L5,L10,L50,L90,L95,SD,Lmin,Lmax,SEL,还要计算后的SEL值符合SEL=10*log(1200)+LeqT的。 Alternum, Alternum1, Alternum2随机生成的数不能为0
|
aed4d0a5334c4a31f1aabc601fa9e954
|
{
"intermediate": 0.23658744990825653,
"beginner": 0.5366391539573669,
"expert": 0.22677336633205414
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.