Skip to content

Commit

Permalink
Merge pull request #118 from benrugg/pullrequests/AyaanShah2204/main
Browse files Browse the repository at this point in the history
Pullrequests/ayaan shah2204/main
  • Loading branch information
benrugg authored Jul 3, 2023
2 parents baf967b + 1f6e961 commit 6356599
Show file tree
Hide file tree
Showing 8 changed files with 453 additions and 4 deletions.
2 changes: 1 addition & 1 deletion __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "AI Render - Stable Diffusion in Blender",
"description": "Create amazing images using Stable Diffusion AI",
"author": "Ben Rugg",
"version": (0, 8, 0),
"version": (0, 9, 0),
"blender": (3, 0, 0),
"location": "Render Properties > AI Render",
"warning": "",
Expand Down
216 changes: 216 additions & 0 deletions operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,191 @@ def sd_upscale(scene):
return True


# Inpainting
def sd_inpaint(scene):
"""Post to the API to generate a Stable Diffusion image with inpainting, and then process it"""
props = scene.air_props

# get the prompt if we haven't been given one
if props.use_animated_prompts:
prompt, negative_prompt = validate_and_process_animated_prompt_text_for_single_frame(scene, scene.frame_current)
if not prompt:
return False
else:
prompt = get_full_prompt(scene)
negative_prompt = props.negative_prompt_text.strip()


# validate the parameters we will send
if not validate_params(scene, prompt):
return False

# generate a new seed, if we want a random one
generate_new_random_seed(scene)

# prepare the output filenames
timestamp = int(time.time())
before_output_filename_prefix = f"ai-render-{timestamp}-1-before"
after_output_filename_prefix = f"ai-render-{timestamp}-2-inpainted"
animation_output_filename_prefix = "ai-render-"

# if we want to use the last SD image, try loading it now
if not props.last_generated_image_filename:
return handle_error("Couldn't find the last Stable Diffusion image", "last_generated_image_filename")
try:
img_file = open(props.last_generated_image_filename, 'rb')
except:
return handle_error("Couldn't load the last Stable Diffusion image. It's probably been deleted or moved. You'll need to restore it or render a new image.", "load_last_generated_image")

# load mask here
if props.inpaint_mask_path == "":
return handle_error("Couldn't find the Inpaint Mask File", "inpaint_mask_path")
try:
mask_file = open(props.inpaint_mask_path, 'rb')
except:
return handle_error("Couldn't load the uploaded inpaint mask file", "inpaint_mask_path")

# prepare data for the API request
params = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": utils.get_output_width(scene),
"height": utils.get_output_height(scene),
"seed": props.seed,
"cfg_scale": props.cfg_scale,
"steps": props.steps,
"is_full_res" : props.inpaint_full_res,
"full_res_padding" : props.inpaint_padding,
}

# get the backend we're using
sd_backend = utils.get_active_backend()

# send to whichever API we're using
start_time = time.time()
generated_image_file = sd_backend.inpaint(params, img_file, mask_file, after_output_filename_prefix, props)

# if we didn't get a successful image, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False

# autosave the after image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)

# store this image filename as the last generated image
props.last_generated_image_filename = generated_image_file

# if we're rendering an animation manually, save the image to the animation output path
if props.is_rendering_animation_manually:
generated_image_file = save_animation_image(scene, animation_output_filename_prefix, generated_image_file)

# load the image into our scene
try:
img = bpy.data.images.load(generated_image_file, check_existing=False)
except:
return handle_error("Couldn't load the image from Stable Diffusion", "load_sd_image")

# view the image in the AIR workspace
try:
utils.view_sd_result_in_air_image_editor(img)
except:
return handle_error("Couldn't switch the view to the image from Stable Diffusion", "view_sd_image")

# return success
return True


# Outpainting
def sd_outpaint(scene):
"""Post to the API to generate a Stable Diffusion image with outpainting, and then process it"""
props = scene.air_props

# get the prompt if we haven't been given one
if props.use_animated_prompts:
prompt, negative_prompt = validate_and_process_animated_prompt_text_for_single_frame(scene, scene.frame_current)
if not prompt:
return False
else:
prompt = get_full_prompt(scene)
negative_prompt = props.negative_prompt_text.strip()


# validate the parameters we will send
if not validate_params(scene, prompt):
return False

# generate a new seed, if we want a random one
generate_new_random_seed(scene)

# prepare the output filenames
timestamp = int(time.time())
before_output_filename_prefix = f"ai-render-{timestamp}-1-before"
after_output_filename_prefix = f"ai-render-{timestamp}-2-outpainted"
animation_output_filename_prefix = "ai-render-"

# if we want to use the last SD image, try loading it now
if not props.last_generated_image_filename:
return handle_error("Couldn't find the last Stable Diffusion image", "last_generated_image_filename")
try:
img_file = open(props.last_generated_image_filename, 'rb')
except:
return handle_error("Couldn't load the last Stable Diffusion image. It's probably been deleted or moved. You'll need to restore it or render a new image.", "load_last_generated_image")


# prepare data for the API request
params = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": utils.get_output_width(scene),
"height": utils.get_output_height(scene),
"seed": props.seed,
"cfg_scale": props.cfg_scale,
"steps": props.steps,
"pixels": props.outpaint_pixels_to_expand,
"mask_blur": props.outpaint_mask_blur,
"directions": [props.outpaint_direction],
"noise_q": props.outpaint_noise_q,
"color_variation": props.outpaint_color_variation,
}

# get the backend we're using
sd_backend = utils.get_active_backend()

# send to whichever API we're using
start_time = time.time()
generated_image_file = sd_backend.outpaint(params, img_file, after_output_filename_prefix, props)

# if we didn't get a successful image, stop here (an error will have been handled by the api function)
if not generated_image_file:
return False

# autosave the after image, if we should
if utils.should_autosave_after_image(props):
generated_image_file = save_after_image(scene, after_output_filename_prefix, generated_image_file)

# store this image filename as the last generated image
props.last_generated_image_filename = generated_image_file

# if we're rendering an animation manually, save the image to the animation output path
if props.is_rendering_animation_manually:
generated_image_file = save_animation_image(scene, animation_output_filename_prefix, generated_image_file)

# load the image into our scene
try:
img = bpy.data.images.load(generated_image_file, check_existing=False)
except:
return handle_error("Couldn't load the image from Stable Diffusion", "load_sd_image")

# view the image in the AIR workspace
try:
utils.view_sd_result_in_air_image_editor(img)
except:
return handle_error("Couldn't switch the view to the image from Stable Diffusion", "view_sd_image")

# return success
return True


class AIR_OT_enable(bpy.types.Operator):
"Enable AI Render in this scene"
Expand Down Expand Up @@ -970,6 +1155,35 @@ def execute(self, context):
return {'FINISHED'}


class AIR_OT_inpaint_from_last_sd_image(bpy.types.Operator):
"Inpaint a new Stable Diffusion image - without re-rendering - using the most recent Stable Diffusion image as the starting point"
bl_idname = "ai_render.inpaint_from_last_sd_image"
bl_label = "Inpaint Image From Last AI Image"

def execute(self, context):
do_pre_render_setup(context.scene)
do_pre_api_setup(context.scene)

# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(sd_inpaint, context.scene))

return {'FINISHED'}


class AIR_OT_outpaint_from_last_sd_image(bpy.types.Operator):
"Inpaint a new Stable Diffusion image - without re-rendering - using the most recent Stable Diffusion image as the starting point"
bl_idname = "ai_render.outpaint_from_last_sd_image"
bl_label = "Outpaint Image From Last AI Image"

def execute(self, context):
do_pre_render_setup(context.scene)
do_pre_api_setup(context.scene)

# post to the api (on a different thread, outside the operator)
task_queue.add(functools.partial(sd_outpaint, context.scene))

return {'FINISHED'}


classes = [
AIR_OT_enable,
Expand All @@ -989,6 +1203,8 @@ def execute(self, context):
AIR_OT_automatic1111_load_controlnet_models,
AIR_OT_automatic1111_load_controlnet_modules,
AIR_OT_automatic1111_load_controlnet_models_and_modules,
AIR_OT_inpaint_from_last_sd_image,
AIR_OT_outpaint_from_last_sd_image,
]


Expand Down
63 changes: 63 additions & 0 deletions properties.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,15 @@ def get_available_controlnet_modules(self, context):
return []


def get_outpaint_directions(self, context):
return [
("up", "up", ""),
("down", "down", ""),
("left", "left", ""),
("right", "right", ""),
]


def ensure_sampler(context):
# """Ensure that the sampler is set to a valid value"""
scene = context.scene
Expand Down Expand Up @@ -275,6 +284,60 @@ class AIRProperties(bpy.types.PropertyGroup):
items=get_available_controlnet_modules,
description="Which ControlNet module (preprocessor) to use (these come with the ControlNet extension)",
)
inpaint_mask_path: bpy.props.StringProperty(
name="Inpaint Mask Path",
default="",
description="Upload Inpaint Mask",
subtype="FILE_PATH",
)
inpaint_full_res: bpy.props.BoolProperty(
name="Inpaint at Full Resolution",
default=True,
description="",
)
inpaint_padding: bpy.props.IntProperty(
name="Inpaint Padding",
max=256,
min=0,
default=32,
step=4,
description="",
)
outpaint_direction: bpy.props.EnumProperty(
name="Outpaint Direction",
items=get_outpaint_directions,
description="The image will expand in this direction",
)
outpaint_pixels_to_expand: bpy.props.IntProperty(
name="Outpaint Pixels to Expand",
min=8,
max=256,
step=8,
default=8,
description="",
)
outpaint_mask_blur: bpy.props.IntProperty(
name="Outpaint Mask Blur",
description="this changes how much the inpainting mask is blurred. This helps to avoid sharp edges on the image.",
min=0,
max=64,
step=1,
default=0,
)
outpaint_noise_q: bpy.props.FloatProperty(
min=0.0,
max=4.0,
default=1.0,
step=0.01,
name="Outpaint Noise Quotient",
)
outpaint_color_variation: bpy.props.FloatProperty(
min=0.0,
max=1.0,
default=0.05,
step=0.01,
name="Outpaint Color Variation",
)


classes = [
Expand Down
8 changes: 8 additions & 0 deletions sd_backends/automatic1111_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,14 @@ def supports_reloading_upscaler_models():
return True


def supports_inpainting():
return False


def supports_outpainting():
return False


def min_image_size():
return 128 * 128

Expand Down
Loading

0 comments on commit 6356599

Please sign in to comment.