add elevation in call
Browse files- data/corgi.jpg +0 -0
- pipeline_mvdream.py +3 -4
- run_imagedream.py +1 -1
- run_mvdream.py +1 -1
data/corgi.jpg
ADDED
|
pipeline_mvdream.py
CHANGED
|
@@ -435,6 +435,7 @@ class MVDreamPipeline(DiffusionPipeline):
|
|
| 435 |
image: Optional[np.ndarray] = None,
|
| 436 |
height: int = 256,
|
| 437 |
width: int = 256,
|
|
|
|
| 438 |
num_inference_steps: int = 50,
|
| 439 |
guidance_scale: float = 7.0,
|
| 440 |
negative_prompt: str = "",
|
|
@@ -489,10 +490,8 @@ class MVDreamPipeline(DiffusionPipeline):
|
|
| 489 |
None,
|
| 490 |
)
|
| 491 |
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
else:
|
| 495 |
-
camera = get_camera(num_frames, elevation=15, extra_view=False).to(dtype=latents.dtype, device=device)
|
| 496 |
camera = camera.repeat_interleave(num_images_per_prompt, dim=0)
|
| 497 |
|
| 498 |
# Prepare extra step kwargs.
|
|
|
|
| 435 |
image: Optional[np.ndarray] = None,
|
| 436 |
height: int = 256,
|
| 437 |
width: int = 256,
|
| 438 |
+
elevation: float = 0,
|
| 439 |
num_inference_steps: int = 50,
|
| 440 |
guidance_scale: float = 7.0,
|
| 441 |
negative_prompt: str = "",
|
|
|
|
| 490 |
None,
|
| 491 |
)
|
| 492 |
|
| 493 |
+
# Get camera
|
| 494 |
+
camera = get_camera(num_frames, elevation=elevation, extra_view=(image is not None)).to(dtype=latents.dtype, device=device)
|
|
|
|
|
|
|
| 495 |
camera = camera.repeat_interleave(num_images_per_prompt, dim=0)
|
| 496 |
|
| 497 |
# Prepare extra step kwargs.
|
run_imagedream.py
CHANGED
|
@@ -20,7 +20,7 @@ args = parser.parse_args()
|
|
| 20 |
|
| 21 |
for i in range(5):
|
| 22 |
input_image = kiui.read_image(args.image, mode='float')
|
| 23 |
-
image = pipe(args.prompt, input_image, guidance_scale=5)
|
| 24 |
grid = np.concatenate(
|
| 25 |
[
|
| 26 |
np.concatenate([image[0], image[2]], axis=0),
|
|
|
|
| 20 |
|
| 21 |
for i in range(5):
|
| 22 |
input_image = kiui.read_image(args.image, mode='float')
|
| 23 |
+
image = pipe(args.prompt, input_image, guidance_scale=5, num_inference_steps=30, elevation=0)
|
| 24 |
grid = np.concatenate(
|
| 25 |
[
|
| 26 |
np.concatenate([image[0], image[2]], axis=0),
|
run_mvdream.py
CHANGED
|
@@ -19,7 +19,7 @@ parser.add_argument("prompt", type=str, default="a cute owl 3d model")
|
|
| 19 |
args = parser.parse_args()
|
| 20 |
|
| 21 |
for i in range(5):
|
| 22 |
-
image = pipe(args.prompt)
|
| 23 |
grid = np.concatenate(
|
| 24 |
[
|
| 25 |
np.concatenate([image[0], image[2]], axis=0),
|
|
|
|
| 19 |
args = parser.parse_args()
|
| 20 |
|
| 21 |
for i in range(5):
|
| 22 |
+
image = pipe(args.prompt, guidance_scale=5, num_inference_steps=30, elevation=0)
|
| 23 |
grid = np.concatenate(
|
| 24 |
[
|
| 25 |
np.concatenate([image[0], image[2]], axis=0),
|