1use std::{collections::VecDeque, env};
2use std::time::Instant;
3use std::sync::Arc;
4
5use anyhow::{Context, Result};
6use bytemuck::{Pod, Zeroable};
7use wgpu::util::DeviceExt;
8use winit::{
9 dpi::PhysicalSize,
10 event::{Event, WindowEvent},
11 event_loop::EventLoop,
12 window::WindowBuilder,
13};
14
15use video_sys::VideoStream;
16
17#[repr(C)]
18#[derive(Clone, Copy, Debug, Pod, Zeroable)]
19struct Vertex {
20 pos: [f32; 2],
21 uv: [f32; 2],
22}
23
24static VERTEX_ATTRS: [wgpu::VertexAttribute; 2] =
25 wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2];
26
27impl Vertex {
28 fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
29 wgpu::VertexBufferLayout {
30 array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
31 step_mode: wgpu::VertexStepMode::Vertex,
32 attributes: &VERTEX_ATTRS,
33 }
34 }
35}
36
37struct State {
38 surface: wgpu::Surface<'static>,
39 device: wgpu::Device,
40 queue: wgpu::Queue,
41 config: wgpu::SurfaceConfiguration,
42 size: PhysicalSize<u32>,
43
44 pipeline: wgpu::RenderPipeline,
45 vbuf: wgpu::Buffer,
46 ibuf: wgpu::Buffer,
47 icount: u32,
48
49 texture: wgpu::Texture,
50 texture_view: wgpu::TextureView,
51 sampler: wgpu::Sampler,
52 bind_group: wgpu::BindGroup,
53
54 video: VideoStream,
55
56 stash: std::collections::VecDeque<video_sys::VideoFrame>,
57 started_at: Option<std::time::Instant>,
58 base_pts_us: Option<i64>,
59 last_presented_pts: i64,
60
61 render_fps_t0: Instant,
62 render_fps_n: u32,
63
64 video_fps_t0: Instant,
65 video_fps_n: u32,
66 video_delta_sum_us: i64,
67 video_delta_n: u32,
68
69}
70
71impl State {
72 async fn new(window: Arc<winit::window::Window>, path: &str) -> Result<Self> {
73 let size = window.inner_size();
74
75 let instance = wgpu::Instance::default();
76 let surface = instance.create_surface(window.clone()).context("create_surface")?;
77
78 let adapter = instance
79 .request_adapter(&wgpu::RequestAdapterOptions {
80 power_preference: wgpu::PowerPreference::HighPerformance,
81 compatible_surface: Some(&surface),
82 force_fallback_adapter: false,
83 })
84 .await
85 .context("request_adapter")?;
86
87 let (device, queue) = adapter
88 .request_device(
89 &wgpu::DeviceDescriptor {
90 label: None,
91 required_features: wgpu::Features::empty(),
92 required_limits: wgpu::Limits::default(),
93 },
94 None,
95 )
96 .await
97 .context("request_device")?;
98
99 let caps = surface.get_capabilities(&adapter);
100 let format = caps.formats[0];
101
102 let config = wgpu::SurfaceConfiguration {
103 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
104 format,
105 width: size.width.max(1),
106 height: size.height.max(1),
107 present_mode: wgpu::PresentMode::Fifo,
108 alpha_mode: caps.alpha_modes[0],
109 view_formats: vec![],
110 desired_maximum_frame_latency: 2,
111 };
112 surface.configure(&device, &config);
113
114 let video = VideoStream::open(path).context("VideoStream::open")?;
116 let tex_w = video.width();
117 let tex_h = video.height();
118
119 let texture = device.create_texture(&wgpu::TextureDescriptor {
121 label: Some("video_texture"),
122 size: wgpu::Extent3d {
123 width: tex_w,
124 height: tex_h,
125 depth_or_array_layers: 1,
126 },
127 mip_level_count: 1,
128 sample_count: 1,
129 dimension: wgpu::TextureDimension::D2,
130 format: wgpu::TextureFormat::Rgba8UnormSrgb,
131 usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
132 view_formats: &[],
133 });
134 let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
135 let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
136 mag_filter: wgpu::FilterMode::Linear,
137 min_filter: wgpu::FilterMode::Linear,
138 ..Default::default()
139 });
140
141 let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
143 label: Some("shader"),
144 source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
145 });
146
147 let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
148 label: Some("bgl"),
149 entries: &[
150 wgpu::BindGroupLayoutEntry {
151 binding: 0,
152 visibility: wgpu::ShaderStages::FRAGMENT,
153 ty: wgpu::BindingType::Texture {
154 multisampled: false,
155 view_dimension: wgpu::TextureViewDimension::D2,
156 sample_type: wgpu::TextureSampleType::Float { filterable: true },
157 },
158 count: None,
159 },
160 wgpu::BindGroupLayoutEntry {
161 binding: 1,
162 visibility: wgpu::ShaderStages::FRAGMENT,
163 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
164 count: None,
165 },
166 ],
167 });
168
169 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
170 label: Some("bg"),
171 layout: &bind_group_layout,
172 entries: &[
173 wgpu::BindGroupEntry {
174 binding: 0,
175 resource: wgpu::BindingResource::TextureView(&texture_view),
176 },
177 wgpu::BindGroupEntry {
178 binding: 1,
179 resource: wgpu::BindingResource::Sampler(&sampler),
180 },
181 ],
182 });
183
184 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
185 label: Some("pl"),
186 bind_group_layouts: &[&bind_group_layout],
187 push_constant_ranges: &[],
188 });
189
190 let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
191 label: Some("pipeline"),
192 layout: Some(&pipeline_layout),
193 vertex: wgpu::VertexState {
194 module: &shader,
195 entry_point: "vs_main",
196 buffers: &[Vertex::desc()],
197 },
198 fragment: Some(wgpu::FragmentState {
199 module: &shader,
200 entry_point: "fs_main",
201 targets: &[Some(wgpu::ColorTargetState {
202 format: config.format,
203 blend: Some(wgpu::BlendState::ALPHA_BLENDING),
204 write_mask: wgpu::ColorWrites::ALL,
205 })],
206 }),
207 primitive: wgpu::PrimitiveState {
208 topology: wgpu::PrimitiveTopology::TriangleList,
209 ..Default::default()
210 },
211 depth_stencil: None,
212 multisample: wgpu::MultisampleState::default(),
213 multiview: None,
214 });
215
216 let vertices = [
217 Vertex { pos: [-1.0, -1.0], uv: [0.0, 1.0] },
218 Vertex { pos: [ 1.0, -1.0], uv: [1.0, 1.0] },
219 Vertex { pos: [ 1.0, 1.0], uv: [1.0, 0.0] },
220 Vertex { pos: [-1.0, 1.0], uv: [0.0, 0.0] },
221 ];
222 let indices: [u16; 6] = [0, 1, 2, 0, 2, 3];
223
224 let vbuf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
225 label: Some("vbuf"),
226 contents: bytemuck::cast_slice(&vertices),
227 usage: wgpu::BufferUsages::VERTEX,
228 });
229 let ibuf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
230 label: Some("ibuf"),
231 contents: bytemuck::cast_slice(&indices),
232 usage: wgpu::BufferUsages::INDEX,
233 });
234
235 Ok(Self {
236 surface,
237 device,
238 queue,
239 config,
240 size,
241 pipeline,
242 vbuf,
243 ibuf,
244 icount: indices.len() as u32,
245 texture,
246 texture_view,
247 sampler,
248 bind_group,
249 video,
250 stash: VecDeque::new(),
251 started_at: None,
252 base_pts_us: None,
253 last_presented_pts: -1,
254 render_fps_t0: Instant::now(),
255 render_fps_n: 0,
256 video_fps_t0: Instant::now(),
257 video_fps_n: 0,
258 video_delta_sum_us: 0,
259 video_delta_n: 0,
260 })
261 }
262
263 fn resize(&mut self, new_size: PhysicalSize<u32>) {
264 if new_size.width == 0 || new_size.height == 0 {
265 return;
266 }
267 self.size = new_size;
268 self.config.width = new_size.width;
269 self.config.height = new_size.height;
270 self.surface.configure(&self.device, &self.config);
271 }
272
273 fn update(&mut self) -> Result<()> {
274 while let Some(f) = self.video.try_recv_one() {
276 self.stash.push_back(f);
277 }
278
279 if self.started_at.is_none() {
281 if let Some(front) = self.stash.front() {
282 self.started_at = Some(std::time::Instant::now());
283 self.base_pts_us = Some(front.pts_us);
284 } else {
285 return Ok(());
286 }
287 }
288
289 let started_at = self.started_at.unwrap();
290 let elapsed_us = started_at.elapsed().as_micros() as i64;
291 let base = self.base_pts_us.unwrap();
292 let target_pts_us = base + elapsed_us;
293
294 let mut latest_due = None;
296 while let Some(front) = self.stash.front() {
297 if front.pts_us <= target_pts_us {
298 latest_due = self.stash.pop_front();
299 } else {
300 break;
301 }
302 }
303
304 if let Some(frame) = latest_due {
305 if frame.pts_us != self.last_presented_pts {
306 let dt = frame.pts_us - self.last_presented_pts;
307 log::info!("present dt={}us", dt);
308 self.last_presented_pts = frame.pts_us;
309 self.upload_frame(&frame.data, frame.width, frame.height);
310 }
311 }
312
313 Ok(())
314 }
315
316
317 fn upload_frame(&mut self, rgba: &[u8], width: u32, height: u32) {
318 let bytes_per_pixel = 4usize;
320 let row_bytes = width as usize * bytes_per_pixel;
321 let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
322 let padded_row_bytes = (row_bytes + align - 1) / align * align;
323
324 if padded_row_bytes == row_bytes {
325 self.queue.write_texture(
326 wgpu::ImageCopyTexture {
327 texture: &self.texture,
328 mip_level: 0,
329 origin: wgpu::Origin3d::ZERO,
330 aspect: wgpu::TextureAspect::All,
331 },
332 rgba,
333 wgpu::ImageDataLayout {
334 offset: 0,
335 bytes_per_row: Some(row_bytes as u32),
336 rows_per_image: Some(height),
337 },
338 wgpu::Extent3d {
339 width,
340 height,
341 depth_or_array_layers: 1,
342 },
343 );
344 return;
345 }
346
347 let mut padded = vec![0u8; padded_row_bytes * height as usize];
348 for y in 0..height as usize {
349 let src = &rgba[y * row_bytes..(y + 1) * row_bytes];
350 let dst = &mut padded[y * padded_row_bytes..y * padded_row_bytes + row_bytes];
351 dst.copy_from_slice(src);
352 }
353
354 self.queue.write_texture(
355 wgpu::ImageCopyTexture {
356 texture: &self.texture,
357 mip_level: 0,
358 origin: wgpu::Origin3d::ZERO,
359 aspect: wgpu::TextureAspect::All,
360 },
361 &padded,
362 wgpu::ImageDataLayout {
363 offset: 0,
364 bytes_per_row: Some(padded_row_bytes as u32),
365 rows_per_image: Some(height),
366 },
367 wgpu::Extent3d {
368 width,
369 height,
370 depth_or_array_layers: 1,
371 },
372 );
373 }
374
375 fn render(&mut self) -> Result<()> {
376 let output = self.surface.get_current_texture().context("get_current_texture")?;
377 let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
378
379 let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
380 label: Some("encoder"),
381 });
382
383 {
384 let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
385 label: Some("render_pass"),
386 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
387 view: &view,
388 resolve_target: None,
389 ops: wgpu::Operations {
390 load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
391 store: wgpu::StoreOp::Store,
392 },
393 })],
394 depth_stencil_attachment: None,
395 timestamp_writes: None,
396 occlusion_query_set: None,
397 });
398
399 rpass.set_pipeline(&self.pipeline);
400 rpass.set_bind_group(0, &self.bind_group, &[]);
401 rpass.set_vertex_buffer(0, self.vbuf.slice(..));
402 rpass.set_index_buffer(self.ibuf.slice(..), wgpu::IndexFormat::Uint16);
403 rpass.draw_indexed(0..self.icount, 0, 0..1);
404 }
405
406 self.queue.submit(Some(encoder.finish()));
407 output.present();
408 Ok(())
409 }
410}
411
412fn main() -> Result<()> {
413 env_logger::init_from_env(env_logger::Env::default().default_filter_or("info"));
414
415 let mut args = env::args().skip(1);
416 let path = args.next().context("Usage: video-player-wgpu <file.mp4>")?;
417
418 let event_loop = EventLoop::new()?;
419 let window = Arc::new(
420 WindowBuilder::new()
421 .with_title("video-player-wgpu")
422 .build(&event_loop)?,
423 );
424
425 let mut state = pollster::block_on(State::new(window.clone(), &path))?;
426
427 event_loop.run(move |event, elwt| {
428 elwt.set_control_flow(winit::event_loop::ControlFlow::Poll);
429
430 match event {
431 Event::WindowEvent { event, .. } => match event {
432 WindowEvent::CloseRequested => elwt.exit(),
433 WindowEvent::Resized(s) => state.resize(s),
434 WindowEvent::RedrawRequested => {
436 if let Err(e) = state.update().and_then(|_| state.render()) {
437 log::error!("render error: {e:?}");
438 }
439 }
440 _ => {}
441 },
442 Event::AboutToWait => {
443 window.request_redraw();
444 }
445 _ => {}
446 }
447 })?;
448
449 Ok(())
450}