Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ import time
|
|
25 |
start_time = time.time()
|
26 |
|
27 |
class AnimationConfig:
|
28 |
-
def __init__(self, driven_audio_path, source_image_path, result_folder,pose_style,expression_scale,enhancer,still,preprocess,ref_pose_video_path):
|
29 |
self.driven_audio = driven_audio_path
|
30 |
self.source_image = source_image_path
|
31 |
self.ref_eyeblink = ref_pose_video_path
|
@@ -34,7 +34,7 @@ class AnimationConfig:
|
|
34 |
self.result_dir = result_folder
|
35 |
self.pose_style = pose_style
|
36 |
self.batch_size = 2
|
37 |
-
self.size =
|
38 |
self.expression_scale = expression_scale
|
39 |
self.input_yaw = None
|
40 |
self.input_pitch = None
|
@@ -207,6 +207,7 @@ async def generate_video():
|
|
207 |
target_language = request.form.get('target_language', 'original_text')
|
208 |
print('target_language',target_language)
|
209 |
pose_style = int(request.form.get('pose_style', 1))
|
|
|
210 |
expression_scale = int(request.form.get('expression_scale', 1))
|
211 |
enhancer = request.form.get('enhancer', None)
|
212 |
voice_gender = request.form.get('voice_gender', 'male')
|
@@ -281,7 +282,7 @@ async def generate_video():
|
|
281 |
print('ref_pose_video_path',ref_pose_video_path)
|
282 |
|
283 |
# Example of using the class with some hypothetical paths
|
284 |
-
args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale, enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path)
|
285 |
|
286 |
if torch.cuda.is_available() and not args.cpu:
|
287 |
args.device = "cuda"
|
|
|
25 |
start_time = time.time()
|
26 |
|
27 |
class AnimationConfig:
|
28 |
+
def __init__(self, driven_audio_path, source_image_path, result_folder,pose_style,expression_scale,enhancer,still,preprocess,ref_pose_video_path,size):
|
29 |
self.driven_audio = driven_audio_path
|
30 |
self.source_image = source_image_path
|
31 |
self.ref_eyeblink = ref_pose_video_path
|
|
|
34 |
self.result_dir = result_folder
|
35 |
self.pose_style = pose_style
|
36 |
self.batch_size = 2
|
37 |
+
self.size = size
|
38 |
self.expression_scale = expression_scale
|
39 |
self.input_yaw = None
|
40 |
self.input_pitch = None
|
|
|
207 |
target_language = request.form.get('target_language', 'original_text')
|
208 |
print('target_language',target_language)
|
209 |
pose_style = int(request.form.get('pose_style', 1))
|
210 |
+
size = int(request.form.get('size', 256))
|
211 |
expression_scale = int(request.form.get('expression_scale', 1))
|
212 |
enhancer = request.form.get('enhancer', None)
|
213 |
voice_gender = request.form.get('voice_gender', 'male')
|
|
|
282 |
print('ref_pose_video_path',ref_pose_video_path)
|
283 |
|
284 |
# Example of using the class with some hypothetical paths
|
285 |
+
args = AnimationConfig(driven_audio_path=driven_audio_path, source_image_path=source_image_path, result_folder=result_folder, pose_style=pose_style, expression_scale=expression_scale, enhancer=enhancer,still=still,preprocess=preprocess,ref_pose_video_path=ref_pose_video_path,size=size)
|
286 |
|
287 |
if torch.cuda.is_available() and not args.cpu:
|
288 |
args.device = "cuda"
|