make the GUI in raylib

This commit is contained in:
William Bell
2025-12-10 20:36:05 +00:00
parent 2fe284a806
commit 4b07780adf
7 changed files with 552 additions and 201 deletions

471
app.py
View File

@@ -1,233 +1,308 @@
from jellyfin_apiclient_python import JellyfinClient
import json
import uuid
import subprocess
from dotenv import load_dotenv
import os
import time
import ffmpeg
import requests
import threading
from urllib.parse import urlencode, urljoin
import subprocess
import numpy as np
import sounddevice as sd
import threading
import queue
import sys
import io
import pyray as pr
import math
from ctypes import c_float
from player import FFQueuePlayer, build_jellyfin_audio_url, server, client
class FFQueuePlayer:
def __init__(self, samplerate=44100, channels=2):
self.samplerate = samplerate
self.channels = channels
# --- Configuration Constants ---
INITIAL_SCREEN_WIDTH = 800
INITIAL_SCREEN_HEIGHT = 600
TARGET_FPS = 60
self.proc = None
self.next_proc = None
# --- State Variables ---
state = {
"screen_width": INITIAL_SCREEN_WIDTH,
"screen_height": INITIAL_SCREEN_HEIGHT,
"current_time": 120.0,
"total_time": 300.0,
"is_playing": True,
# 3D Camera State
"camera": None,
"render_texture": None,
# Assets
"album_texture": None,
"album_model": None
}
self.current_file = None
self.next_file = None
# --- Utility Functions ---
self.next_preloaded = False
def format_time_mm_ss(seconds):
"""Converts a time in seconds to an 'MM:SS' string format."""
seconds = int(seconds)
minutes = seconds // 60
seconds_remainder = seconds % 60
return f"{minutes:02d}:{seconds_remainder:02d}"
self.closed=False
# --- Dynamic Layout Functions ---
self.playing = False
self.position = 0.0
self.duration = 1.0
self.next_duration=1.0
def get_3d_render_area(screen_width, screen_height):
ASPECT_WIDTH = 2.0
ASPECT_HEIGHT = 1.0
ASPECT_RATIO = ASPECT_WIDTH / ASPECT_HEIGHT
self.song = 0
max_available_width = screen_width * 0.7
max_available_height = screen_height * 0.5
self.song_queue = queue.Queue()
self.swap_pending = False
if (max_available_width / max_available_height) > ASPECT_RATIO:
height = max_available_height
width = height * ASPECT_RATIO
else:
width = max_available_width
height = width / ASPECT_RATIO
self.lock = threading.Lock()
self.stream = sd.RawOutputStream(
samplerate=self.samplerate,
channels=self.channels,
dtype="int16",
callback=self._callback
)
self.stream.start()
def _open_ffmpeg(self, url, seek=0):
self.song+=1
return subprocess.Popen(
[
"ffmpeg",
"-ss", str(seek),
"-i", url,
"-f", "s16le",
"-ac", str(self.channels),
"-ar", str(self.samplerate),
"-loglevel", "verbose",
"-"
],
stdout=subprocess.PIPE,
stderr=open(str(self.song)+".txt", "wb")
)
x = (screen_width - width) / 2
y = screen_height * 0.15
def seek(self, pos):
with self.lock:
self.proc = self._open_ffmpeg(self.current_file, pos)
self.position = pos
return pr.Rectangle(x, y, width, height)
def get_progress_bar_rect(screen_width, screen_height):
width = screen_width * 0.7
height = screen_height * 0.03
x = (screen_width - width) / 2
y = screen_height * 0.75
return pr.Rectangle(x, y, width, height)
def draw_progress_bar(rect, current_time, total_time):
if total_time > 0:
progress_ratio = current_time / total_time
else:
progress_ratio = 0.0
pr.draw_rectangle_rec(rect, pr.Color(100, 100, 100, 255))
progress_width = rect.width * progress_ratio
pr.draw_rectangle(int(rect.x), int(rect.y), int(progress_width), int(rect.height), pr.Color(200, 50, 50, 255))
pr.draw_rectangle_lines_ex(rect, 2, pr.Color(50, 50, 50, 255))
def close(self):
self.closed=True
self.stream.close()
def get_duration(self, url):
"""Return duration in seconds for the track"""
try:
result = subprocess.run(
[
"ffprobe",
"-v", "quiet",
"-print_format", "json",
"-show_format",
"-show_streams",
url
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
time_text = f"{format_time_mm_ss(current_time)} / {format_time_mm_ss(total_time)}"
text_width = pr.measure_text(time_text, int(rect.height * 0.7))
pr.draw_text(time_text,
int(rect.x + rect.width / 2 - text_width / 2),
int(rect.y + rect.height * 0.15),
int(rect.height * 0.7),
pr.WHITE)
info = json.loads(result.stdout)
# --- ASSET MANAGEMENT ---
# Prefer stream duration → fallback to format duration
if "streams" in info:
for s in info["streams"]:
if "duration" in s:
return float(s["duration"])
def load_album_assets():
"""Loads the texture, creates the 3D model, and applies the flipped texture."""
# 1. Load Image
try:
image = pr.load_image("music/albumcover.png")
except:
print("WARNING: 'music/albumcover.png' not found. Using placeholder.")
image = pr.gen_image_checked(512, 512, 32, 32, pr.DARKGRAY, pr.WHITE)
if "format" in info and "duration" in info["format"]:
return float(info["format"]["duration"])
except Exception as e:
print("ffprobe error:", e)
return None
def add_to_queue(self, url):
self.song_queue.put_nowait(url)
def play(self):
with self.lock:
if not self.playing:
self.playing = True
def pause(self):
with self.lock:
self.playing = False
def _start_next(self):
# Kill old pipeline
if self.proc:
self.proc.kill()
# --- THE FIX: FLIP THE IMAGE VERTICALLY ---
pr.image_flip_vertical(image)
# 2. Create Texture
texture = pr.load_texture_from_image(image)
pr.unload_image(image)
# 3. Generate Mesh (CD Case)
mesh = pr.gen_mesh_cube(1.5, 1.5, 0.0)
# 4. Load Model
model = pr.load_model_from_mesh(mesh)
# 5. Apply Texture
# We use index 0 for the Albedo/Diffuse map
map_index = 0
# Use MATERIAL_MAP_ALBEDO if the binding is modern enough
if hasattr(pr.MaterialMapIndex, 'MATERIAL_MAP_ALBEDO'):
map_index = pr.MaterialMapIndex.MATERIAL_MAP_ALBEDO
# Move next pipeline into active
self.position = 0.0
self.proc = self.next_proc
self.current_file = self.next_file
self.duration = self.next_duration
self.next_proc=None
self.next_preloaded = False
model.materials[0].maps[map_index].texture = texture
def preload_next(self):
self.next_file = self.song_queue.get()
self.next_duration = self.get_duration(self.next_file)
self.next_proc = self._open_ffmpeg(self.next_file)
self.next_preloaded = True
return texture, model
def preload_next_threaded(self):
if self.next_preloaded: return
self.next_preloaded = True
threading.Thread(target=self.preload_next).start()
# --- CORE 3D RENDERING ---
def _callback(self, outdata, frames, t, status):
with self.lock:
needed = frames * self.channels * 2
data = b''
if self.proc is None:
self.preload_next()
self._start_next()
else:
data = self.proc.stdout.read(needed) or b''
self.position += len(data) / (self.samplerate * self.channels * 2)
if self.position >= self.duration-10:
self.preload_next_threaded()
if self.proc.poll() is not None and len(data)<needed:
self._start_next()
new_data = self.proc.stdout.read(needed-len(data)) or b''
self.position += len(new_data) / (self.samplerate * self.channels * 2)
data += new_data
outdata[:len(data)]=data
outdata[len(data):] = b"\x00" * (needed-len(data))
def setup_3d_environment(render_width, render_height):
camera = pr.Camera3D()
camera.position = pr.Vector3(0.0, 0.0, 4.0) # Moved back slightly to fit the new models
camera.target = pr.Vector3(0.0, 0.0, 0.0)
camera.up = pr.Vector3(0.0, 1.0, 0.0)
camera.fovy = 45.0
camera.projection = pr.CameraProjection.CAMERA_PERSPECTIVE
return camera
def build_jellyfin_audio_url(
base_url: str,
item_id: str,
api_key: str,
user_id: str,
container: str = "flac",
audio_codec: str = "flac",
bitrate: int | None = None,
media_source_id: str | None = None,
) -> str:
def draw_3d_cover_flow(camera, model):
"""
Build a Jellyfin audio stream URL using urllib.parse.
Draws the textured model using the existing Matrix logic.
"""
path = f"/Items/{item_id}/Download"
pr.begin_mode_3d(camera)
# We use pr.WHITE as the tint so the texture shows its original colors.
# If you use pr.RED, the album cover will look red-tinted.
# --------------------------------------------------------
# 2. CURRENT ALBUM (Center)
# --------------------------------------------------------
# Draw model at (0,0,0) with 1.0 scale
pr.rl_push_matrix()
pr.rl_translatef(0.0, 0.0, 1.5) # Spaced out slightly more
pr.rl_rotatef(0.0, 0.0, 1.0, 0.0) # Sharper angle
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.WHITE)
pr.rl_pop_matrix()
params = {
"UserId": user_id,
"Container": container,
"AudioCodec": audio_codec, # <-- IMPORTANT
"api_key": api_key,
}
# --------------------------------------------------------
# 3. PREVIOUS ALBUM (Far Far Left)
# --------------------------------------------------------
pr.rl_push_matrix()
pr.rl_translatef(-3.5, 0.0, 0.0) # Spaced out slightly more
pr.rl_rotatef(90.0, 0.0, 1.0, 0.0) # Sharper angle
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.LIGHTGRAY) # Slightly darkened
pr.rl_pop_matrix()
if bitrate is not None:
params["Bitrate"] = bitrate
# --------------------------------------------------------
# 3. PREVIOUS ALBUM (Far Left)
# --------------------------------------------------------
pr.rl_push_matrix()
pr.rl_translatef(-2.5, 0.0, 0.0) # Spaced out slightly more
pr.rl_rotatef(90.0, 0.0, 1.0, 0.0) # Sharper angle
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.LIGHTGRAY) # Slightly darkened
pr.rl_pop_matrix()
if media_source_id is not None:
params["MediaSourceId"] = media_source_id
# --------------------------------------------------------
# 3. PREVIOUS ALBUM (Near Left)
# --------------------------------------------------------
pr.rl_push_matrix()
pr.rl_translatef(-1.5, 0.0, 0.5) # Added slight Z offset for depth
pr.rl_rotatef(65.0, 0.0, 1.0, 0.0)
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.WHITE)
pr.rl_pop_matrix()
# --------------------------------------------------------
# 4. NEXT ALBUM (Near Right)
# --------------------------------------------------------
pr.rl_push_matrix()
pr.rl_translatef(1.5, 0.0, 0.5)
pr.rl_rotatef(-65.0, 0.0, 1.0, 0.0)
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.WHITE)
pr.rl_pop_matrix()
query = urlencode(params)
return urljoin(base_url, path) + "?" + query
# --------------------------------------------------------
# 4. NEXT ALBUM (Far Right)
# --------------------------------------------------------
pr.rl_push_matrix()
pr.rl_translatef(2.5, 0.0, 0.0)
pr.rl_rotatef(-90.0, 0.0, 1.0, 0.0)
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.LIGHTGRAY)
pr.rl_pop_matrix()
# --------------------------------------------------------
# 4. NEXT ALBUM (Far Far Right)
# --------------------------------------------------------
pr.rl_push_matrix()
pr.rl_translatef(3.5, 0.0, 0.0)
pr.rl_rotatef(-90.0, 0.0, 1.0, 0.0)
pr.draw_model(model, pr.Vector3(0.0, 0.0, 0.0), 1.0, pr.LIGHTGRAY)
pr.rl_pop_matrix()
pr.end_mode_3d()
# --- Main Setup and Loop ---
client = JellyfinClient()
load_dotenv()
client.config.app('FinPod', '0.0.1', 'FinPod prototype', 'FinPod_prototype_1')
client.config.data["auth.ssl"] = True
client.auth.connect_to_address(os.getenv("host"))
client.auth.login(os.getenv("URL"), os.getenv("username"), os.getenv("password"))
credentials = client.auth.credentials.get_credentials()
server = credentials["Servers"][0]
print(json.dumps(server))
# Initialization
pr.set_config_flags(pr.ConfigFlags.FLAG_WINDOW_RESIZABLE)
pr.set_config_flags(pr.FLAG_MSAA_4X_HINT)
pr.init_window(state["screen_width"], state["screen_height"], "UgPod")
pr.set_target_fps(TARGET_FPS)
player = FFQueuePlayer()
# Build Jellyfin URLs
# Add to queue
print("add queue")
player.add_to_queue(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("99067e877d91be1a66eb5a7ff2f4128f")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("916eda422f48efd8705f29e0600a3e60")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("5e1067d59ed98979ad12a58548b27b83")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("8bcf8240d12aa5c3b14dc3b57f32fef7")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("38a6c21561f54d284a6acad89a3ea8b0")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("631aeddb0557fef65f49463abb20ad7f")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue('music/pink floyd/dark side of the moon/01 Speak to Me.flac')#(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("99067e877d91be1a66eb5a7ff2f4128f")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue('music/pink floyd/dark side of the moon/02 Breathe (In the Air).flac')#(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("916eda422f48efd8705f29e0600a3e60")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue('music/pink floyd/dark side of the moon/03 On the Run.flac')#(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("5e1067d59ed98979ad12a58548b27b83")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue('music/pink floyd/dark side of the moon/04 Time.flac')#(build_jellyfin_audio_url(server["address"], client.jellyfin.get_item("8bcf8240d12aa5c3b14dc3b57f32fef7")["Id"], server["AccessToken"], server["UserId"]))
player.add_to_queue('music/pink floyd/dark side of the moon/05 The Great Gig in the Sky.flac')
player.add_to_queue('music/pink floyd/dark side of the moon/06 Money.flac')
player.add_to_queue('music/pink floyd/dark side of the moon/07 Us and Them.flac')
player.add_to_queue('music/pink floyd/dark side of the moon/08 Any Colour You Like.flac')
player.add_to_queue('music/pink floyd/dark side of the moon/09 Brain Damage.flac')
player.add_to_queue('music/pink floyd/dark side of the moon/10 Eclipse.flac')
print("add queue done")
player.play()
while True:
print("pos:", str(round((player.position*100)/player.duration))+"%", player.position, '/', player.duration)
time.sleep(1)
# Initial setup
render_rect = get_3d_render_area(state["screen_width"], state["screen_height"])
state["render_texture"] = pr.load_render_texture(int(render_rect.width), int(render_rect.height))
state["camera"] = setup_3d_environment(int(render_rect.width), int(render_rect.height))
player.close()
# LOAD THE ASSETS
state["album_texture"], state["album_model"] = load_album_assets()
# --- Main Game Loop ---
while not pr.window_should_close():
# 1. Update
current_width = pr.get_screen_width()
current_height = pr.get_screen_height()
if pr.is_window_resized():
state["screen_width"] = current_width
state["screen_height"] = current_height
render_rect = get_3d_render_area(current_width, current_height)
pr.unload_render_texture(state["render_texture"])
state["render_texture"] = pr.load_render_texture(int(render_rect.width), int(render_rect.height))
delta_time = pr.get_frame_time()
if pr.is_key_pressed(pr.KeyboardKey.KEY_SPACE):
if player.playing:
player.pause()
else:
player.play()
if pr.is_key_pressed(pr.KeyboardKey.KEY_LEFT):
player.seek(player.position-5)
if pr.is_key_pressed(pr.KeyboardKey.KEY_RIGHT):
player.seek(player.position+5)
# ----------------------------------------------------
# 2. DRAW 3D SCENE
# ----------------------------------------------------
render_rect = get_3d_render_area(current_width, current_height)
pr.begin_texture_mode(state["render_texture"])
pr.clear_background(pr.Color(20, 20, 20, 255))
# Pass the loaded model to the draw function
draw_3d_cover_flow(state["camera"], state["album_model"])
pr.end_texture_mode()
# ----------------------------------------------------
# 3. DRAW 2D GUI
# ----------------------------------------------------
pr.begin_drawing()
pr.clear_background(pr.Color(40, 40, 40, 255))
progress_rect = get_progress_bar_rect(current_width, current_height)
title_size = int(current_height * 0.05)
pr.draw_text("UgPod", int(current_width * 0.05), int(current_height * 0.05), title_size, pr.SKYBLUE)
source_rect = pr.Rectangle(0, 0, state["render_texture"].texture.width, -state["render_texture"].texture.height)
pr.draw_texture_pro(state["render_texture"].texture,
source_rect, render_rect, pr.Vector2(0, 0), 0.0, pr.WHITE)
pr.draw_rectangle_lines_ex(render_rect, 3, pr.LIME)
draw_progress_bar(progress_rect, player.position, player.playback_info_to_duration(player.playback_info))
pr.draw_text(f"Status: {'Playing' if player.playing else 'Paused'} (SPACE)",
int(current_width * 0.05), int(current_height * 0.9), int(current_height * 0.03), pr.LIME)
pr.end_drawing()
# --- De-initialization ---
pr.unload_texture(state["album_texture"]) # Unload the texture
pr.unload_model(state["album_model"]) # Unload the model/mesh
pr.unload_render_texture(state["render_texture"])
pr.close_window()