app.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import cv2 import mediapipe as mp import numpy as np # Paths input_video_path = "video.mp4" background_image_path = "bg.jpg" output_video_path = "output_video.mp4" # Initialize MediaPipe selfie segmentation mp_selfie_segmentation = mp.solutions.selfie_segmentation segmentor = mp_selfie_segmentation.SelfieSegmentation(model_selection=1) # Load background image bg_image = cv2.imread(background_image_path) if bg_image is None: raise Exception("Background image not found.") # Video capture and writer cap = cv2.VideoCapture(input_video_path) fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height)) # Resize background to match video frame size bg_image = cv2.resize(bg_image, (width, height)) while cap.isOpened(): ret, frame = cap.read() if not ret: break # Convert to RGB for MediaPipe rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Get the segmentation mask results = segmentor.process(rgb_frame) mask = results.segmentation_mask condition = mask > 0.5 # Combine background and original frame output_frame = np.where(condition[..., None], frame, bg_image) # Write to output video out.write(output_frame) # Release resources cap.release() out.release() segmentor.close() print(f"Done! Saved as {output_video_path}") |
app.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import cv2 import mediapipe as mp import numpy as np import imageio import os # Paths input_video_path = "video.mp4" background_path = "1.mp4" # Can be .webp / .mp4 / .gif output_video_path = "output_video.mp4" # Init MediaPipe mp_selfie_segmentation = mp.solutions.selfie_segmentation segmentor = mp_selfie_segmentation.SelfieSegmentation(model_selection=1) # Foreground video cap = cv2.VideoCapture(input_video_path) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height)) # Detect background type ext = os.path.splitext(background_path)[1].lower() bg_frames = [] bg_cap = None bg_is_video = False if ext in ['.jpg', '.jpeg', '.png', '.webp']: bg_image = cv2.imread(background_path) if bg_image is None: raise Exception("Background image not found.") bg_image = cv2.resize(bg_image, (width, height)) elif ext in ['.mp4', '.avi', '.mov']: bg_cap = cv2.VideoCapture(background_path) bg_is_video = True elif ext in ['.gif']: gif = imageio.mimread(background_path) bg_frames = [cv2.resize(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), (width, height)) for frame in gif] if not bg_frames: raise Exception("GIF has no frames.") else: raise Exception("Unsupported background format.") bg_frame_index = 0 # Frame loop while cap.isOpened(): ret, frame = cap.read() if not ret: break # Get background frame if bg_is_video: ret_bg, bg_frame = bg_cap.read() if not ret_bg: bg_cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Loop video ret_bg, bg_frame = bg_cap.read() bg_frame = cv2.resize(bg_frame, (width, height)) elif bg_frames: bg_frame = bg_frames[bg_frame_index % len(bg_frames)] bg_frame_index += 1 else: bg_frame = bg_image # Segment rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) results = segmentor.process(rgb_frame) condition = results.segmentation_mask > 0.5 # Composite output_frame = np.where(condition[..., None], frame, bg_frame) out.write(output_frame) # Release cap.release() out.release() if bg_cap: bg_cap.release() segmentor.close() print(f"Done! Saved as {output_video_path}") |