diff --git a/scripts/txt2vid.py b/scripts/txt2vid.py index 1edcfe4..28b5c9b 100644 --- a/scripts/txt2vid.py +++ b/scripts/txt2vid.py @@ -946,6 +946,7 @@ def diffuse( num_inference_steps, cfg_scale, eta, + fps=30 ): torch_device = cond_latents.get_device() @@ -1055,8 +1056,8 @@ def diffuse( speed = "it/s" duration = 1 / duration - # - total_frames = (st.session_state.sampling_steps + st.session_state.num_inference_steps) * st.session_state.max_duration_in_seconds + # perhaps add an option to set fps within the ui eventually + total_frames = st.session_state.max_duration_in_seconds * fps total_steps = st.session_state.sampling_steps + st.session_state.num_inference_steps if i > st.session_state.sampling_steps: @@ -1195,13 +1196,13 @@ def load_diffusers_model(weights_path,torch_device): st.session_state["progress_bar_text"].error(e) # -def save_video_to_disk(frames, seeds, sanitized_prompt, fps=6,save_video=True, outdir='outputs'): +def save_video_to_disk(frames, seeds, sanitized_prompt, fps=30,save_video=True, outdir='outputs'): if save_video: # write video to memory #output = io.BytesIO() #writer = imageio.get_writer(os.path.join(os.getcwd(), st.session_state['defaults'].general.outdir, "txt2vid"), im, extension=".mp4", fps=30) #try: - video_path = os.path.join(os.getcwd(), outdir, "txt2vid",f"{seeds}_{sanitized_prompt}{datetime.now().strftime('%Y%m-%d%H-%M%S-') + str(uuid4())[:8]}.mp4") + video_path = os.path.join(os.getcwd(), outdir, "txt2vid",f"{seeds}_{sanitized_prompt}{datetime.datetime.now().strftime('%Y%m-%d%H-%M%S-') + str(uuid4())[:8]}.mp4") writer = imageio.get_writer(video_path, fps=fps) for frame in frames: writer.append_data(frame) @@ -1482,9 +1483,9 @@ def txt2vid( #) # old code - total_frames = (st.session_state.sampling_steps + st.session_state.num_inference_steps) * st.session_state.max_duration_in_seconds + total_frames = st.session_state.max_duration_in_seconds * fps - while second_count < max_duration_in_seconds: + while frame_index+1 <= total_frames: st.session_state["frame_duration"] = 0 st.session_state["frame_speed"] = 0 st.session_state["current_frame"] = frame_index @@ -1506,7 +1507,7 @@ def txt2vid( #init = slerp(gpu, float(t), init1, init2) with autocast("cuda"): - image = diffuse(server_state["pipe"], cond_embeddings, init, num_inference_steps, cfg_scale, eta) + image = diffuse(server_state["pipe"], cond_embeddings, init, num_inference_steps, cfg_scale, eta, fps=fps) if st.session_state["save_individual_images"] and not st.session_state["use_GFPGAN"] and not st.session_state["use_RealESRGAN"]: #im = Image.fromarray(image) @@ -1601,7 +1602,7 @@ def layout(): placeholder = "A corgi wearing a top hat as an oil painting." prompt = st.text_area("Input Text","", placeholder=placeholder, height=54) sygil_suggestions.suggestion_area(placeholder) - + if "defaults" in st.session_state: if st.session_state['defaults'].admin.global_negative_prompt: prompt += f"### {st.session_state['defaults'].admin.global_negative_prompt}" @@ -1914,9 +1915,10 @@ def layout(): beta_schedule=beta_scheduler_type, starting_image=None) if video and save_video_on_stop: + if os.path.exists(video): # temporary solution to bypass exception # show video preview on the UI after we hit the stop button # currently not working as session_state is cleared on StopException - preview_video.video(open(video, 'rb').read()) + preview_video.video(open(video, 'rb').read()) #message.success('Done!', icon="✅") message.success('Render Complete: ' + info + '; Stats: ' + stats, icon="✅")