Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
") pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels")) mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=self.elem_id("mask_blur")) direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction")) noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=self.elem_id("noise_q")) color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=self.elem_id("color_variation")) return [info, pixels, mask_blur, direction, noise_q, color_variation] def run(self, p, _, pixels, mask_blur, direction, noise_q, color_variation): initial_seed_and_info = [None, None] process_width = p.width process_height = p.height p.mask_blur = mask_blur*4 p.inpaint_full_res = False p.inpainting_fill = 1 p.do_not_save_samples = True p.do_not_save_grid = True left = pixels if "left" in direction else 0 right = pixels if "right" in direction else 0 up = pixels if "up" in direction else 0 down = pixels if "down" in direction else 0 init_img = p.init_images[0] target_w = math.ceil((init_img.width + left + right) / 64) * 64 target_h = math.ceil((init_img.height + up + down) / 64) * 64 if left > 0: left = left * (target_w - init_img.width) // (left + right) if right > 0: right = target_w - init_img.width - left if up > 0: up = up * (target_h - init_img.height) // (up + down) if down > 0: down = target_h - init_img.height - up def expand(init, count, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False): is_horiz = is_left or is_right is_vert = is_top or is_bottom pixels_horiz = expand_pixels if is_horiz else 0 pixels_vert = expand_pixels if is_vert else 0 images_to_process = [] output_images = [] for n in range(count): res_w = init[n].width + pixels_horiz res_h = init[n].height + pixels_vert process_res_w = math.ceil(res_w / 64) * 64 process_res_h = math.ceil(res_h / 64) * 64 img = Image.new("RGB", (process_res_w, process_res_h)) img.paste(init[n], (pixels_horiz if is_left else 0, pixels_vert if is_top else 0)) mask = Image.new("RGB", (process_res_w, process_res_h), "white") draw = ImageDraw.Draw(mask) draw.rectangle(( expand_pixels + mask_blur if is_left else 0, expand_pixels + mask_blur if is_top else 0, mask.width - expand_pixels - mask_blur if is_right else res_w, mask.height - expand_pixels - mask_blur if is_bottom else res_h, ), fill="black") np_image = (np.asarray(img) / 255.0).astype(np.float64) np_mask = (np.asarray(mask) / 255.0).astype(np.float64) noised = get_matched_noise(np_image, np_mask, noise_q, color_variation) output_images.append(Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB")) target_width = min(process_width, init[n].width + pixels_horiz) if is_horiz else img.width target_height = min(process_height, init[n].height + pixels_vert) if is_vert else img.height p.width = target_width if is_horiz else img.width p.height = target_height if is_vert else img.height crop_region = ( 0 if is_left else output_images[n].width - target_width, 0 if is_top else output_images[n].height - target_height, target_width if is_left else output_images[n].width, target_height if is_top else output_images[n].height, ) mask = mask.crop(crop_region) p.image_mask = mask image_to_process = output_images[n].crop(crop_region) images_to_process.append(image_to_process) p.init_images = images_to_process latent_mask = Image.new("RGB", (p.width, p.height), "white") draw = ImageDraw.Draw(latent_mask) draw.rectangle(( expand_pixels + mask_blur * 2 if is_left else 0, expand_pixels + mask_blur * 2 if is_top else 0, mask.width - expand_pixels - mask_blur * 2 if is_right else res_w, mask.height - expand_pixels - mask_blur * 2 if is_bottom else res_h, ), fill="black") p.latent_mask = latent_mask proc = process_images(p) if initial_seed_and_info[0] is None: initial_seed_and_info[0] = proc.seed initial_seed_and_info[1] = proc.info for n in range(count): output_images[n].paste(proc.images[n], (0 if is_left else output_images[n].width - proc.images[n].width, 0 if is_top else output_images[n].height - proc.images[n].height)) output_images[n] = output_images[n].crop((0, 0, res_w, res_h)) return output_images batch_count = p.n_iter batch_size = p.batch_size p.n_iter = 1 state.job_count = batch_count * ((1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0)) all_processed_images = [] for i in range(batch_count): imgs = [init_img] * batch_size state.job = f"Batch {i + 1} out of {batch_count}" if left > 0: imgs = expand(imgs, batch_size, left, is_left=True) if right > 0: imgs = expand(imgs, batch_size, right, is_right=True) if up > 0: imgs = expand(imgs, batch_size, up, is_top=True) if down > 0: imgs = expand(imgs, batch_size, down, is_bottom=True) all_processed_images += imgs all_images = all_processed_images combined_grid_image = images.image_grid(all_processed_images) unwanted_grid_because_of_img_count = len(all_processed_images) < 2 and opts.grid_only_if_multiple if opts.return_grid and not unwanted_grid_because_of_img_count: all_images = [combined_grid_image] + all_processed_images res = Processed(p, all_images, initial_seed_and_info[0], initial_seed_and_info[1]) if opts.samples_save: for img in all_processed_images: images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.grid_format, info=res.info, p=p) if opts.grid_save and not unwanted_grid_because_of_img_count: images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p) return res ================================================ FILE: scripts/poor_mans_outpainting.py ================================================ import math import modules.scripts as scripts import gradio as gr from PIL import Image, ImageDraw from modules import images, processing, devices from modules.processing import Processed, process_images from modules.shared import opts, cmd_opts, state class Script(scripts.Script): def title(self): return "Poor man's outpainting" def show(self, is_img2img): return is_img2img def ui(self, is_img2img): if not is_img2img: return None pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels")) mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur")) inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill")) direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction")) return [pixels, mask_blur, inpainting_fill, direction] def run(self, p, pixels, mask_blur, inpainting_fill, direction): initial_seed = None initial_info = None p.mask_blur = mask_blur * 2 p.inpainting_fill = inpainting_fill p.inpaint_full_res = False left = pixels if "left" in direction else 0 right = pixels if "right" in direction else 0 up = pixels if "up" in direction else 0 down = pixels if "down" in direction else 0 init_img = p.init_images[0] target_w = math.ceil((init_img.width + left + right) / 64) * 64 target_h = math.ceil((init_img.height + up + down) / 64) * 64 if left > 0: left = left * (target_w - init_img.width) // (left + right) if right > 0: right = target_w - init_img.width - left if up > 0: up = up * (target_h - init_img.height) // (up + down) if down > 0: down = target_h - init_img.height - up img = Image.new("RGB", (target_w, target_h)) img.paste(init_img, (left, up)) mask = Image.new("L", (img.width, img.height), "white") draw = ImageDraw.Draw(mask) draw.rectangle(( left + (mask_blur * 2 if left > 0 else 0), up + (mask_blur * 2 if up > 0 else 0), mask.width - right - (mask_blur * 2 if right > 0 else 0), mask.height - down - (mask_blur * 2 if down > 0 else 0) ), fill="black") latent_mask = Image.new("L", (img.width, img.height), "white") latent_draw = ImageDraw.Draw(latent_mask) latent_draw.rectangle(( left + (mask_blur//2 if left > 0 else 0), up + (mask_blur//2 if up > 0 else 0), mask.width - right - (mask_blur//2 if right > 0 else 0), mask.height - down - (mask_blur//2 if down > 0 else 0) ), fill="black") devices.torch_gc() grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=pixels) grid_mask = images.split_grid(mask, tile_w=p.width, tile_h=p.height, overlap=pixels) grid_latent_mask = images.split_grid(latent_mask, tile_w=p.width, tile_h=p.height, overlap=pixels) p.n_iter = 1 p.batch_size = 1 p.do_not_save_grid = True p.do_not_save_samples = True work = [] work_mask = [] work_latent_mask = [] work_results = [] for (y, h, row), (_, _, row_mask), (_, _, row_latent_mask) in zip(grid.tiles, grid_mask.tiles, grid_latent_mask.tiles): for tiledata, tiledata_mask, tiledata_latent_mask in zip(row, row_mask, row_latent_mask): x, w = tiledata[0:2] if x >= left and x+w <= img.width - right and y >= up and y+h <= img.height - down: continue work.append(tiledata[2]) work_mask.append(tiledata_mask[2]) work_latent_mask.append(tiledata_latent_mask[2]) batch_count = len(work) print(f"Poor man's outpainting will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)}.") state.job_count = batch_count for i in range(batch_count): p.init_images = [work[i]] p.image_mask = work_mask[i] p.latent_mask = work_latent_mask[i] state.job = f"Batch {i + 1} out of {batch_count}" processed = process_images(p) if initial_seed is None: initial_seed = processed.seed initial_info = processed.info p.seed = processed.seed + 1 work_results += processed.images image_index = 0 for y, h, row in grid.tiles: for tiledata in row: x, w = tiledata[0:2] if x >= left and x+w <= img.width - right and y >= up and y+h <= img.height - down: continue tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height)) image_index += 1 combined_image = images.combine_grid(grid) if opts.samples_save: images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.grid_format, info=initial_info, p=p) processed = Processed(p, [combined_image], initial_seed, initial_info) return processed ================================================ FILE: scripts/postprocessing_codeformer.py ================================================ from PIL import Image import numpy as np from modules import scripts_postprocessing, codeformer_model import gradio as gr from modules.ui_components import FormRow class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing): name = "CodeFormer" order = 3000 def ui(self): with FormRow(): codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility") codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight") return { "codeformer_visibility": codeformer_visibility, "codeformer_weight": codeformer_weight, } def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight): if codeformer_visibility == 0: return restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight) res = Image.fromarray(restored_img) if codeformer_visibility < 1.0: res = Image.blend(pp.image, res, codeformer_visibility) pp.image = res pp.info["CodeFormer visibility"] = round(codeformer_visibility, 3) pp.info["CodeFormer weight"] = round(codeformer_weight, 3) ================================================ FILE: scripts/postprocessing_gfpgan.py ================================================ from PIL import Image import numpy as np from modules import scripts_postprocessing, gfpgan_model import gradio as gr from modules.ui_components import FormRow class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing): name = "GFPGAN" order = 2000 def ui(self): with FormRow(): gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility") return { "gfpgan_visibility": gfpgan_visibility, } def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility): if gfpgan_visibility == 0: return restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8)) res = Image.fromarray(restored_img) if gfpgan_visibility < 1.0: res = Image.blend(pp.image, res, gfpgan_visibility) pp.image = res pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3) ================================================ FILE: scripts/postprocessing_upscale.py ================================================ from PIL import Image import numpy as np from modules import scripts_postprocessing, shared import gradio as gr from modules.ui_components import FormRow upscale_cache = {} class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): name = "Upscale" order = 1000 def ui(self): selected_tab = gr.State(value=0) with gr.Tabs(elem_id="extras_resize_mode"): with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with FormRow(): extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) with FormRow(): extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) return { "upscale_mode": selected_tab, "upscale_by": upscaling_resize, "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, } def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop): if upscale_mode == 1: upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}" else: info["Postprocess upscale by"] = upscale_by cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) cached_image = upscale_cache.pop(cache_key, None) if cached_image is not None: image = cached_image else: image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path) upscale_cache[cache_key] = image if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache: upscale_cache.pop(next(iter(upscale_cache), None), None) if upscale_mode == 1 and upscale_crop: cropped = Image.new("RGB", (upscale_to_width, upscale_to_height)) cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2)) image = cropped info["Postprocess crop to"] = f"{image.width}x{image.height}" return image def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): if upscaler_1_name == "None": upscaler_1_name = None upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None) assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}' if not upscaler1: return if upscaler_2_name == "None": upscaler_2_name = None upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None) assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}' upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) pp.info[f"Postprocess upscaler"] = upscaler1.name if upscaler2 and upscaler_2_visibility > 0: second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) pp.info[f"Postprocess upscaler 2"] = upscaler2.name pp.image = upscaled_image def image_changed(self): upscale_cache.clear() class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale): name = "Simple Upscale" order = 900 def ui(self): with FormRow(): upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2) return { "upscale_by": upscale_by, "upscaler_name": upscaler_name, } def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None): if upscaler_name is None or upscaler_name == "None": return upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None) assert upscaler1, f'could not find upscaler named {upscaler_name}' pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False) pp.info[f"Postprocess upscaler"] = upscaler1.name ================================================ FILE: scripts/prompt_matrix.py ================================================ import math from collections import namedtuple from copy import copy import random import modules.scripts as scripts import gradio as gr from modules import images from modules.processing import process_images, Processed from modules.shared import opts, cmd_opts, state import modules.sd_samplers def draw_xy_grid(xs, ys, x_label, y_label, cell): res = [] ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys] hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs] first_processed = None state.job_count = len(xs) * len(ys) for iy, y in enumerate(ys): for ix, x in enumerate(xs): state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}" processed = cell(x, y) if first_processed is None: first_processed = processed res.append(processed.images[0]) grid = images.image_grid(res, rows=len(ys)) grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts) first_processed.images = [grid] return first_processed class Script(scripts.Script): def title(self): return "Prompt matrix" def ui(self, is_img2img): gr.HTML('Will upscale the image by the selected scale factor; use width and height sliders to set tile size
") overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap")) scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor")) upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=self.elem_id("upscaler_index")) return [info, overlap, upscaler_index, scale_factor] def run(self, p, _, overlap, upscaler_index, scale_factor): if isinstance(upscaler_index, str): upscaler_index = [x.name.lower() for x in shared.sd_upscalers].index(upscaler_index.lower()) processing.fix_seed(p) upscaler = shared.sd_upscalers[upscaler_index] p.extra_generation_params["SD upscale overlap"] = overlap p.extra_generation_params["SD upscale upscaler"] = upscaler.name initial_info = None seed = p.seed init_img = p.init_images[0] init_img = images.flatten(init_img, opts.img2img_background_color) if upscaler.name != "None": img = upscaler.scaler.upscale(init_img, scale_factor, upscaler.data_path) else: img = init_img devices.torch_gc() grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=overlap) batch_size = p.batch_size upscale_count = p.n_iter p.n_iter = 1 p.do_not_save_grid = True p.do_not_save_samples = True work = [] for y, h, row in grid.tiles: for tiledata in row: work.append(tiledata[2]) batch_count = math.ceil(len(work) / batch_size) state.job_count = batch_count * upscale_count print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.") result_images = [] for n in range(upscale_count): start_seed = seed + n p.seed = start_seed work_results = [] for i in range(batch_count): p.batch_size = batch_size p.init_images = work[i * batch_size:(i + 1) * batch_size] state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}" processed = processing.process_images(p) if initial_info is None: initial_info = processed.info p.seed = processed.seed + 1 work_results += processed.images image_index = 0 for y, h, row in grid.tiles: for tiledata in row: tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height)) image_index += 1 combined_image = images.combine_grid(grid) result_images.append(combined_image) if opts.samples_save: images.save_image(combined_image, p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p) processed = Processed(p, result_images, seed, initial_info) return processed ================================================ FILE: scripts/xyz_grid.py ================================================ from collections import namedtuple from copy import copy from itertools import permutations, chain import random import csv from io import StringIO from PIL import Image import numpy as np import modules.scripts as scripts import gradio as gr from modules import images, paths, sd_samplers, processing, sd_models, sd_vae from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.sd_samplers import modules.sd_models import modules.sd_vae import glob import os import re from modules.ui_components import ToolButton fill_values_symbol = "\U0001f4d2" # 📒 AxisInfo = namedtuple('AxisInfo', ['axis', 'values']) def apply_field(field): def fun(p, x, xs): setattr(p, field, x) return fun def apply_prompt(p, x, xs): if xs[0] not in p.prompt and xs[0] not in p.negative_prompt: raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.") p.prompt = p.prompt.replace(xs[0], x) p.negative_prompt = p.negative_prompt.replace(xs[0], x) def apply_order(p, x, xs): token_order = [] # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen for token in x: token_order.append((p.prompt.find(token), token)) token_order.sort(key=lambda t: t[0]) prompt_parts = [] # Split the prompt up, taking out the tokens for _, token in token_order: n = p.prompt.find(token) prompt_parts.append(p.prompt[0:n]) p.prompt = p.prompt[n + len(token):] # Rebuild the prompt with the tokens in the order we want prompt_tmp = "" for idx, part in enumerate(prompt_parts): prompt_tmp += part prompt_tmp += x[idx] p.prompt = prompt_tmp + p.prompt def apply_sampler(p, x, xs): sampler_name = sd_samplers.samplers_map.get(x.lower(), None) if sampler_name is None: raise RuntimeError(f"Unknown sampler: {x}") p.sampler_name = sampler_name def confirm_samplers(p, xs): for x in xs: if x.lower() not in sd_samplers.samplers_map: raise RuntimeError(f"Unknown sampler: {x}") def apply_checkpoint(p, x, xs): info = modules.sd_models.get_closet_checkpoint_match(x) if info is None: raise RuntimeError(f"Unknown checkpoint: {x}") modules.sd_models.reload_model_weights(shared.sd_model, info) def confirm_checkpoints(p, xs): for x in xs: if modules.sd_models.get_closet_checkpoint_match(x) is None: raise RuntimeError(f"Unknown checkpoint: {x}") def apply_clip_skip(p, x, xs): opts.data["CLIP_stop_at_last_layers"] = x def apply_upscale_latent_space(p, x, xs): if x.lower().strip() != '0': opts.data["use_scale_latent_for_hires_fix"] = True else: opts.data["use_scale_latent_for_hires_fix"] = False def find_vae(name: str): if name.lower() in ['auto', 'automatic']: return modules.sd_vae.unspecified if name.lower() == 'none': return None else: choices = [x for x in sorted(modules.sd_vae.vae_dict, key=lambda x: len(x)) if name.lower().strip() in x.lower()] if len(choices) == 0: print(f"No VAE found for {name}; using automatic") return modules.sd_vae.unspecified else: return modules.sd_vae.vae_dict[choices[0]] def apply_vae(p, x, xs): modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=find_vae(x)) def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _): p.styles.extend(x.split(',')) def format_value_add_label(p, opt, x): if type(x) == float: x = round(x, 8) return f"{opt.label}: {x}" def format_value(p, opt, x): if type(x) == float: x = round(x, 8) return x def format_value_join_list(p, opt, x): return ", ".join(x) def do_nothing(p, x, xs): pass def format_nothing(p, opt, x): return "" def str_permutations(x): """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" return x class AxisOption: def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None): self.label = label self.type = type self.apply = apply self.format_value = format_value self.confirm = confirm self.cost = cost self.choices = choices class AxisOptionImg2Img(AxisOption): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.is_img2img = True class AxisOptionTxt2Img(AxisOption): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.is_img2img = False axis_options = [ AxisOption("Nothing", str, do_nothing, format_value=format_nothing), AxisOption("Seed", int, apply_field("seed")), AxisOption("Var. seed", int, apply_field("subseed")), AxisOption("Var. strength", float, apply_field("subseed_strength")), AxisOption("Steps", int, apply_field("steps")), AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")), AxisOption("CFG Scale", float, apply_field("cfg_scale")), AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), AxisOption("Sigma noise", float, apply_field("s_noise")), AxisOption("Eta", float, apply_field("eta")), AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), ] def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed, margin_size): hor_texts = [[images.GridAnnotation(x)] for x in x_labels] ver_texts = [[images.GridAnnotation(y)] for y in y_labels] title_texts = [[images.GridAnnotation(z)] for z in z_labels] # Temporary list of all the images that are generated to be populated into the grid. # Will be filled with empty images for any individual step that fails to process properly image_cache = [None] * (len(xs) * len(ys) * len(zs)) processed_result = None cell_mode = "P" cell_size = (1, 1) state.job_count = len(xs) * len(ys) * len(zs) * p.n_iter def process_cell(x, y, z, ix, iy, iz): nonlocal image_cache, processed_result, cell_mode, cell_size def index(ix, iy, iz): return ix + iy * len(xs) + iz * len(xs) * len(ys) state.job = f"{index(ix, iy, iz) + 1} out of {len(xs) * len(ys) * len(zs)}" processed: Processed = cell(x, y, z) try: # this dereference will throw an exception if the image was not processed # (this happens in cases such as if the user stops the process from the UI) processed_image = processed.images[0] if processed_result is None: # Use our first valid processed result as a template container to hold our full results processed_result = copy(processed) cell_mode = processed_image.mode cell_size = processed_image.size processed_result.images = [Image.new(cell_mode, cell_size)] processed_result.all_prompts = [processed.prompt] processed_result.all_seeds = [processed.seed] processed_result.infotexts = [processed.infotexts[0]] image_cache[index(ix, iy, iz)] = processed_image if include_lone_images: processed_result.images.append(processed_image) processed_result.all_prompts.append(processed.prompt) processed_result.all_seeds.append(processed.seed) processed_result.infotexts.append(processed.infotexts[0]) except: image_cache[index(ix, iy, iz)] = Image.new(cell_mode, cell_size) if first_axes_processed == 'x': for ix, x in enumerate(xs): if second_axes_processed == 'y': for iy, y in enumerate(ys): for iz, z in enumerate(zs): process_cell(x, y, z, ix, iy, iz) else: for iz, z in enumerate(zs): for iy, y in enumerate(ys): process_cell(x, y, z, ix, iy, iz) elif first_axes_processed == 'y': for iy, y in enumerate(ys): if second_axes_processed == 'x': for ix, x in enumerate(xs): for iz, z in enumerate(zs): process_cell(x, y, z, ix, iy, iz) else: for iz, z in enumerate(zs): for ix, x in enumerate(xs): process_cell(x, y, z, ix, iy, iz) elif first_axes_processed == 'z': for iz, z in enumerate(zs): if second_axes_processed == 'x': for ix, x in enumerate(xs): for iy, y in enumerate(ys): process_cell(x, y, z, ix, iy, iz) else: for iy, y in enumerate(ys): for ix, x in enumerate(xs): process_cell(x, y, z, ix, iy, iz) if not processed_result: print("Unexpected error: draw_xyz_grid failed to return even a single processed image") return Processed(p, []) sub_grids = [None] * len(zs) for i in range(len(zs)): start_index = i * len(xs) * len(ys) end_index = start_index + len(xs) * len(ys) grid = images.image_grid(image_cache[start_index:end_index], rows=len(ys)) if draw_legend: grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts, margin_size) sub_grids[i] = grid if include_sub_grids and len(zs) > 1: processed_result.images.insert(i+1, grid) sub_grid_size = sub_grids[0].size z_grid = images.image_grid(sub_grids, rows=1) if draw_legend: z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]]) processed_result.images[0] = z_grid return processed_result, sub_grids class SharedSettingsStackHelper(object): def __enter__(self): self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers self.vae = opts.sd_vae def __exit__(self, exc_type, exc_value, tb): opts.data["sd_vae"] = self.vae modules.sd_models.reload_model_weights() modules.sd_vae.reload_vae_weights() opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*") re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") class Script(scripts.Script): def title(self): return "X/Y/Z plot" def ui(self, is_img2img): self.current_axis_options = [x for x in axis_options if type(x) == AxisOption or x.is_img2img == is_img2img] with gr.Row(): with gr.Column(scale=19): with gr.Row(): x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False) with gr.Row(): y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False) with gr.Row(): z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type")) z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values")) fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False) with gr.Row(variant="compact", elem_id="axis_options"): with gr.Column(): draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend")) no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds")) with gr.Column(): include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images")) include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids")) with gr.Column(): margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size")) with gr.Row(variant="compact", elem_id="swap_axes"): swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button") swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button") swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button") def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values): return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values xy_swap_args = [x_type, x_values, y_type, y_values] swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args) yz_swap_args = [y_type, y_values, z_type, z_values] swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args) xz_swap_args = [x_type, x_values, z_type, z_values] swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) def fill(x_type): axis = self.current_axis_options[x_type] return ", ".join(axis.choices()) if axis.choices else gr.update() fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values]) fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values]) fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values]) def select_axis(x_type): return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None) x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button]) y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button]) z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button]) self.infotext_fields = ( (x_type, "X Type"), (x_values, "X Values"), (y_type, "Y Type"), (y_values, "Y Values"), (z_type, "Z Type"), (z_values, "Z Values"), ) return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): if not no_fixed_seeds: modules.processing.fix_seed(p) if not opts.return_grid: p.batch_size = 1 def process_axis(opt, vals): if opt.label == 'Nothing': return [0] valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))] if opt.type == int: valslist_ext = [] for val in valslist: m = re_range.fullmatch(val) mc = re_range_count.fullmatch(val) if m is not None: start = int(m.group(1)) end = int(m.group(2))+1 step = int(m.group(3)) if m.group(3) is not None else 1 valslist_ext += list(range(start, end, step)) elif mc is not None: start = int(mc.group(1)) end = int(mc.group(2)) num = int(mc.group(3)) if mc.group(3) is not None else 1 valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()] else: valslist_ext.append(val) valslist = valslist_ext elif opt.type == float: valslist_ext = [] for val in valslist: m = re_range_float.fullmatch(val) mc = re_range_count_float.fullmatch(val) if m is not None: start = float(m.group(1)) end = float(m.group(2)) step = float(m.group(3)) if m.group(3) is not None else 1 valslist_ext += np.arange(start, end + step, step).tolist() elif mc is not None: start = float(mc.group(1)) end = float(mc.group(2)) num = int(mc.group(3)) if mc.group(3) is not None else 1 valslist_ext += np.linspace(start=start, stop=end, num=num).tolist() else: valslist_ext.append(val) valslist = valslist_ext elif opt.type == str_permutations: valslist = list(permutations(valslist)) valslist = [opt.type(x) for x in valslist] # Confirm options are valid before starting if opt.confirm: opt.confirm(p, valslist) return valslist x_opt = self.current_axis_options[x_type] xs = process_axis(x_opt, x_values) y_opt = self.current_axis_options[y_type] ys = process_axis(y_opt, y_values) z_opt = self.current_axis_options[z_type] zs = process_axis(z_opt, z_values) def fix_axis_seeds(axis_opt, axis_list): if axis_opt.label in ['Seed', 'Var. seed']: return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] else: return axis_list if not no_fixed_seeds: xs = fix_axis_seeds(x_opt, xs) ys = fix_axis_seeds(y_opt, ys) zs = fix_axis_seeds(z_opt, zs) if x_opt.label == 'Steps': total_steps = sum(xs) * len(ys) * len(zs) elif y_opt.label == 'Steps': total_steps = sum(ys) * len(xs) * len(zs) elif z_opt.label == 'Steps': total_steps = sum(zs) * len(xs) * len(ys) else: total_steps = p.steps * len(xs) * len(ys) * len(zs) if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr: if x_opt.label == "Hires steps": total_steps += sum(xs) * len(ys) * len(zs) elif y_opt.label == "Hires steps": total_steps += sum(ys) * len(xs) * len(zs) elif z_opt.label == "Hires steps": total_steps += sum(zs) * len(xs) * len(ys) elif p.hr_second_pass_steps: total_steps += p.hr_second_pass_steps * len(xs) * len(ys) * len(zs) else: total_steps *= 2 total_steps *= p.n_iter image_cell_count = p.n_iter * p.batch_size cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else "" plural_s = 's' if len(zs) > 1 else '' print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})") shared.total_tqdm.updateTotal(total_steps) grid_infotext = [None] state.xyz_plot_x = AxisInfo(x_opt, xs) state.xyz_plot_y = AxisInfo(y_opt, ys) state.xyz_plot_z = AxisInfo(z_opt, zs) # If one of the axes is very slow to change between (like SD model # checkpoint), then make sure it is in the outer iteration of the nested # `for` loop. first_axes_processed = 'x' second_axes_processed = 'y' if x_opt.cost > y_opt.cost and x_opt.cost > z_opt.cost: first_axes_processed = 'x' if y_opt.cost > z_opt.cost: second_axes_processed = 'y' else: second_axes_processed = 'z' elif y_opt.cost > x_opt.cost and y_opt.cost > z_opt.cost: first_axes_processed = 'y' if x_opt.cost > z_opt.cost: second_axes_processed = 'x' else: second_axes_processed = 'z' elif z_opt.cost > x_opt.cost and z_opt.cost > y_opt.cost: first_axes_processed = 'z' if x_opt.cost > y_opt.cost: second_axes_processed = 'x' else: second_axes_processed = 'y' def cell(x, y, z): if shared.state.interrupted: return Processed(p, [], p.seed, "") pc = copy(p) pc.styles = pc.styles[:] x_opt.apply(pc, x, xs) y_opt.apply(pc, y, ys) z_opt.apply(pc, z, zs) res = process_images(pc) if grid_infotext[0] is None: pc.extra_generation_params = copy(pc.extra_generation_params) pc.extra_generation_params['Script'] = self.title() if x_opt.label != 'Nothing': pc.extra_generation_params["X Type"] = x_opt.label pc.extra_generation_params["X Values"] = x_values if x_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: pc.extra_generation_params["Fixed X Values"] = ", ".join([str(x) for x in xs]) if y_opt.label != 'Nothing': pc.extra_generation_params["Y Type"] = y_opt.label pc.extra_generation_params["Y Values"] = y_values if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys]) if z_opt.label != 'Nothing': pc.extra_generation_params["Z Type"] = z_opt.label pc.extra_generation_params["Z Values"] = z_values if z_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: pc.extra_generation_params["Fixed Z Values"] = ", ".join([str(z) for z in zs]) grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds) return res with SharedSettingsStackHelper(): processed, sub_grids = draw_xyz_grid( p, xs=xs, ys=ys, zs=zs, x_labels=[x_opt.format_value(p, x_opt, x) for x in xs], y_labels=[y_opt.format_value(p, y_opt, y) for y in ys], z_labels=[z_opt.format_value(p, z_opt, z) for z in zs], cell=cell, draw_legend=draw_legend, include_lone_images=include_lone_images, include_sub_grids=include_sub_grids, first_axes_processed=first_axes_processed, second_axes_processed=second_axes_processed, margin_size=margin_size ) if opts.grid_save and len(sub_grids) > 1: for sub_grid in sub_grids: images.save_image(sub_grid, p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) if opts.grid_save: images.save_image(processed.images[0], p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) return processed ================================================ FILE: style.css ================================================ .container { max-width: 100%; } .token-counter{ position: absolute; display: inline-block; right: 2em; min-width: 0 !important; width: auto; z-index: 100; } .token-counter.error span{ box-shadow: 0 0 0.0 0.3em rgba(255,0,0,0.15), inset 0 0 0.6em rgba(255,0,0,0.075); border: 2px solid rgba(255,0,0,0.4) !important; } .token-counter div{ display: inline; } .token-counter span{ padding: 0.1em 0.75em; } #sh{ min-width: 2em; min-height: 2em; max-width: 2em; max-height: 2em; flex-grow: 0; padding-left: 0.25em; padding-right: 0.25em; margin: 0.1em 0; opacity: 0%; cursor: default; } .output-html p {margin: 0 0.5em;} .row > *, .row > .gr-form > * { min-width: min(120px, 100%); flex: 1 1 0%; } .performance { font-size: 0.85em; color: #444; } .performance p{ display: inline-block; } .performance .time { margin-right: 0; } .performance .vram { } #txt2img_generate, #img2img_generate { min-height: 4.5em; } @media screen and (min-width: 2500px) { #txt2img_gallery, #img2img_gallery { min-height: 768px; } } #txt2img_gallery img, #img2img_gallery img{ object-fit: scale-down; } #txt2img_actions_column, #img2img_actions_column { margin: 0.35rem 0.75rem 0.35rem 0; } #script_list { padding: .625rem .75rem 0 .625rem; } .justify-center.overflow-x-scroll { justify-content: left; } .justify-center.overflow-x-scroll button:first-of-type { margin-left: auto; } .justify-center.overflow-x-scroll button:last-of-type { margin-right: auto; } [id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{ min-width: 2.3em; height: 2.5em; flex-grow: 0; padding-left: 0.25em; padding-right: 0.25em; } #hidden_element{ display: none; } [id$=_seed_row], [id$=_subseed_row]{ gap: 0.5rem; padding: 0.6em; } [id$=_subseed_show_box]{ min-width: auto; flex-grow: 0; } [id$=_subseed_show_box] > div{ border: 0; height: 100%; } [id$=_subseed_show]{ min-width: auto; flex-grow: 0; padding: 0; } [id$=_subseed_show] label{ height: 100%; } #txt2img_actions_column, #img2img_actions_column{ gap: 0; margin-right: .75rem; } #txt2img_tools, #img2img_tools{ gap: 0.4em; } #interrogate_col{ min-width: 0 !important; max-width: 8em !important; margin-right: 1em; gap: 0; } #interrogate, #deepbooru{ margin: 0em 0.25em 0.5em 0.25em; min-width: 8em; max-width: 8em; } #style_pos_col, #style_neg_col{ min-width: 8em !important; } #txt2img_styles_row, #img2img_styles_row{ gap: 0.25em; margin-top: 0.3em; } #txt2img_styles_row > button, #img2img_styles_row > button{ margin: 0; } #txt2img_styles, #img2img_styles{ padding: 0; } #txt2img_styles > label > div, #img2img_styles > label > div{ min-height: 3.2em; } ul.list-none{ max-height: 35em; z-index: 2000; } .gr-form{ background: transparent; } .my-4{ margin-top: 0; margin-bottom: 0; } #resize_mode{ flex: 1.5; } button{ align-self: stretch !important; } .overflow-hidden, .gr-panel{ overflow: visible !important; } #x_type, #y_type{ max-width: 10em; } #txt2img_preview, #img2img_preview, #ti_preview{ position: absolute; width: 320px; left: 0; right: 0; margin-left: auto; margin-right: auto; margin-top: 34px; z-index: 100; border: none; border-top-left-radius: 0; border-top-right-radius: 0; } @media screen and (min-width: 768px) { #txt2img_preview, #img2img_preview, #ti_preview { position: absolute; } } @media screen and (max-width: 767px) { #txt2img_preview, #img2img_preview, #ti_preview { position: relative; } } #txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0, #ti_preview div.left-0.top-0{ display: none; } fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ position: absolute; top: -0.7em; line-height: 1.2em; padding: 0; margin: 0 0.5em; background-color: white; box-shadow: 6px 0 6px 0px white, -6px 0 6px 0px white; z-index: 300; } .dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ background-color: rgb(31, 41, 55); box-shadow: none; border: 1px solid rgba(128, 128, 128, 0.1); border-radius: 6px; padding: 0.1em 0.5em; } #txt2img_column_batch, #img2img_column_batch{ min-width: min(13.5em, 100%) !important; } #settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{ position: relative; border: none; margin-right: 8em; } #settings .gr-panel div.flex-col div.justify-between div{ position: relative; z-index: 200; } #settings{ display: block; } #settings > div{ border: none; margin-left: 10em; } #settings > div.flex-wrap{ float: left; display: block; margin-left: 0; width: 10em; } #settings > div.flex-wrap button{ display: block; border: none; text-align: left; } #settings_result{ height: 1.4em; margin: 0 1.2em; } input[type="range"]{ margin: 0.5em 0 -0.3em 0; } #mask_bug_info { text-align: center; display: block; margin-top: -0.75em; margin-bottom: -0.75em; } #txt2img_negative_prompt, #img2img_negative_prompt{ } /* gradio 3.8 adds opacity to progressbar which makes it blink; disable it here */ .transition.opacity-20 { opacity: 1 !important; } /* more gradio's garbage cleanup */ .min-h-\[4rem\] { min-height: unset !important; } .min-h-\[6rem\] { min-height: unset !important; } .progressDiv{ position: relative; height: 20px; background: #b4c0cc; border-radius: 3px !important; margin-bottom: -3px; } .dark .progressDiv{ background: #424c5b; } .progressDiv .progress{ width: 0%; height: 20px; background: #0060df; color: white; font-weight: bold; line-height: 20px; padding: 0 8px 0 0; text-align: right; border-radius: 3px; overflow: visible; white-space: nowrap; padding: 0 0.5em; } .livePreview{ position: absolute; z-index: 300; background-color: white; margin: -4px; } .dark .livePreview{ background-color: rgb(17 24 39 / var(--tw-bg-opacity)); } .livePreview img{ position: absolute; object-fit: contain; width: 100%; height: 100%; } #lightboxModal{ display: none; position: fixed; z-index: 1001; padding-top: 100px; left: 0; top: 0; width: 100%; height: 100%; overflow: auto; background-color: rgba(20, 20, 20, 0.95); user-select: none; -webkit-user-select: none; } .modalControls { display: grid; grid-template-columns: 32px 32px 32px 1fr 32px; grid-template-areas: "zoom tile save space close"; position: absolute; top: 0; left: 0; right: 0; padding: 16px; gap: 16px; background-color: rgba(0,0,0,0.2); } .modalClose { grid-area: close; } .modalZoom { grid-area: zoom; } .modalSave { grid-area: save; } .modalTileImage { grid-area: tile; } .modalClose, .modalZoom, .modalTileImage { color: white; font-size: 35px; font-weight: bold; cursor: pointer; } .modalSave { color: white; font-size: 28px; margin-top: 8px; font-weight: bold; cursor: pointer; } .modalClose:hover, .modalClose:focus, .modalSave:hover, .modalSave:focus, .modalZoom:hover, .modalZoom:focus { color: #999; text-decoration: none; cursor: pointer; } #modalImage { display: block; margin-left: auto; margin-right: auto; margin-top: auto; width: auto; } .modalImageFullscreen { object-fit: contain; height: 90%; } .modalPrev, .modalNext { cursor: pointer; position: absolute; top: 50%; width: auto; padding: 16px; margin-top: -50px; color: white; font-weight: bold; font-size: 20px; transition: 0.6s ease; border-radius: 0 3px 3px 0; user-select: none; -webkit-user-select: none; } .modalNext { right: 0; border-radius: 3px 0 0 3px; } .modalPrev:hover, .modalNext:hover { background-color: rgba(0, 0, 0, 0.8); } #imageARPreview{ position:absolute; top:0px; left:0px; border:2px solid red; background:rgba(255, 0, 0, 0.3); z-index: 900; pointer-events:none; display:none } #txt2img_generate_box, #img2img_generate_box{ position: relative; } #txt2img_interrupt, #img2img_interrupt, #txt2img_skip, #img2img_skip{ position: absolute; width: 50%; height: 100%; background: #b4c0cc; display: none; } #txt2img_interrupt, #img2img_interrupt{ left: 0; border-radius: 0.5rem 0 0 0.5rem; } #txt2img_skip, #img2img_skip{ right: 0; border-radius: 0 0.5rem 0.5rem 0; } .red { color: red; } .gallery-item { --tw-bg-opacity: 0 !important; } #context-menu{ z-index:9999; position:absolute; display:block; padding:0px 0; border:2px solid #a55000; border-radius:8px; box-shadow:1px 1px 2px #CE6400; width: 200px; } .context-menu-items{ list-style: none; margin: 0; padding: 0; } .context-menu-items a{ display:block; padding:5px; cursor:pointer; } .context-menu-items a:hover{ background: #a55000; } #quicksettings { width: fit-content; } #quicksettings > div, #quicksettings > fieldset{ max-width: 24em; min-width: 24em; padding: 0; border: none; box-shadow: none; background: none; margin-right: 10px; } #quicksettings > div > div > div > label > span { position: relative; margin-right: 9em; margin-bottom: -1em; } canvas[key="mask"] { z-index: 12 !important; filter: invert(); mix-blend-mode: multiply; pointer-events: none; } /* gradio 3.4.1 stuff for editable scrollbar values */ .gr-box > div > div > input.gr-text-input{ position: absolute; right: 0.5em; top: -0.6em; z-index: 400; width: 6em; } #quicksettings .gr-box > div > div > input.gr-text-input { top: -1.12em; } .row.gr-compact{ overflow: visible; } #img2img_image, #img2img_image > .h-60, #img2img_image > .h-60 > div, #img2img_image > .h-60 > div > img, #img2img_sketch, #img2img_sketch > .h-60, #img2img_sketch > .h-60 > div, #img2img_sketch > .h-60 > div > img, #img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h-60 > div > img, #inpaint_sketch, #inpaint_sketch > .h-60, #inpaint_sketch > .h-60 > div, #inpaint_sketch > .h-60 > div > img { height: 480px !important; max-height: 480px !important; min-height: 480px !important; } /* Extensions */ #tab_extensions table{ border-collapse: collapse; } #tab_extensions table td, #tab_extensions table th{ border: 1px solid #ccc; padding: 0.25em 0.5em; } #tab_extensions table input[type="checkbox"]{ margin-right: 0.5em; } #tab_extensions button{ max-width: 16em; } #tab_extensions input[disabled="disabled"]{ opacity: 0.5; } .extension-tag{ font-weight: bold; font-size: 95%; } #available_extensions .info{ margin: 0; } #available_extensions .date_added{ opacity: 0.85; font-size: 90%; } #image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{ min-width: auto; padding-left: 0.5em; padding-right: 0.5em; } .gr-form{ background-color: white; } .dark .gr-form{ background-color: rgb(31 41 55 / var(--tw-bg-opacity)); } .gr-button-tool, .gr-button-tool-top{ max-width: 2.5em; min-width: 2.5em !important; height: 2.4em; } .gr-button-tool{ margin: 0.6em 0em 0.55em 0; } .gr-button-tool-top, #settings .gr-button-tool{ margin: 1.6em 0.7em 0.55em 0; } #modelmerger_results_container{ margin-top: 1em; overflow: visible; } #modelmerger_models{ gap: 0; } #quicksettings .gr-button-tool{ margin: 0; border-color: unset; background-color: unset; } #modelmerger_interp_description>p { margin: 0!important; text-align: center; } #modelmerger_interp_description { margin: 0.35rem 0.75rem 1.23rem; } #img2img_settings > div.gr-form, #txt2img_settings > div.gr-form { padding-top: 0.9em; padding-bottom: 0.9em; } #txt2img_settings { padding-top: 1.16em; padding-bottom: 0.9em; } #img2img_settings { padding-bottom: 0.9em; } #img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form, #train_tabs div.gr-form .gr-form{ border: none; padding-bottom: 0.5em; } footer { display: none !important; } #footer{ text-align: center; } #footer div{ display: inline-block; } #footer .versions{ font-size: 85%; opacity: 0.85; } #txtimg_hr_finalres{ min-height: 0 !important; padding: .625rem .75rem; margin-left: -0.75em } #txtimg_hr_finalres .resolution{ font-weight: bold; } #txt2img_checkboxes, #img2img_checkboxes{ margin-bottom: 0.5em; margin-left: 0em; } #txt2img_checkboxes > div, #img2img_checkboxes > div{ flex: 0; white-space: nowrap; min-width: auto; } #img2img_copy_to_img2img, #img2img_copy_to_sketch, #img2img_copy_to_inpaint, #img2img_copy_to_inpaint_sketch{ margin-left: 0em; } #axis_options { margin-left: 0em; } .inactive{ opacity: 0.5; } [id*='_prompt_container']{ gap: 0; } [id*='_prompt_container'] > div{ margin: -0.4em 0 0 0; } .gr-compact { border: none; } .dark .gr-compact{ background-color: rgb(31 41 55 / var(--tw-bg-opacity)); margin-left: 0; } .gr-compact{ overflow: visible; } .gr-compact > *{ } .gr-compact .gr-block, .gr-compact .gr-form{ border: none; box-shadow: none; } .gr-compact .gr-box{ border-radius: .5rem !important; border-width: 1px !important; } #mode_img2img > div > div{ gap: 0 !important; } [id*='img2img_copy_to_'] { border: none; } [id*='img2img_copy_to_'] > button { } [id*='img2img_label_copy_to_'] { font-size: 1.0em; font-weight: bold; text-align: center; line-height: 2.4em; } .extra-networks > div > [id *= '_extra_']{ margin: 0.3em; } .extra-network-subdirs{ padding: 0.2em 0.35em; } .extra-network-subdirs button{ margin: 0 0.15em; } #txt2img_extra_networks .search, #img2img_extra_networks .search{ display: inline-block; max-width: 16em; margin: 0.3em; align-self: center; } #txt2img_extra_view, #img2img_extra_view { width: auto; } .extra-network-cards .nocards, .extra-network-thumbs .nocards{ margin: 1.25em 0.5em 0.5em 0.5em; } .extra-network-cards .nocards h1, .extra-network-thumbs .nocards h1{ font-size: 1.5em; margin-bottom: 1em; } .extra-network-cards .nocards li, .extra-network-thumbs .nocards li{ margin-left: 0.5em; } .extra-network-thumbs { display: flex; flex-flow: row wrap; gap: 10px; } .extra-network-thumbs .card { height: 6em; width: 6em; cursor: pointer; background-image: url('./file=html/card-no-preview.png'); background-size: cover; background-position: center center; position: relative; } .extra-network-thumbs .card:hover .additional a { display: block; } .extra-network-thumbs .actions .additional a { background-image: url('./file=html/image-update.svg'); background-repeat: no-repeat; background-size: cover; background-position: center center; position: absolute; top: 0; left: 0; width: 24px; height: 24px; display: none; font-size: 0; text-align: -9999; } .extra-network-thumbs .actions .name { position: absolute; bottom: 0; font-size: 10px; padding: 3px; width: 100%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; background: rgba(0,0,0,.5); color: white; } .extra-network-thumbs .card:hover .actions .name { white-space: normal; word-break: break-all; } .extra-network-cards .card{ display: inline-block; margin: 0.5em; width: 16em; height: 24em; box-shadow: 0 0 5px rgba(128, 128, 128, 0.5); border-radius: 0.2em; position: relative; background-size: auto 100%; background-position: center; overflow: hidden; cursor: pointer; background-image: url('./file=html/card-no-preview.png') } .extra-network-cards .card:hover{ box-shadow: 0 0 2px 0.3em rgba(0, 128, 255, 0.35); } .extra-network-cards .card .actions .additional{ display: none; } .extra-network-cards .card .actions{ position: absolute; bottom: 0; left: 0; right: 0; padding: 0.5em; color: white; background: rgba(0,0,0,0.5); box-shadow: 0 0 0.25em 0.25em rgba(0,0,0,0.5); text-shadow: 0 0 0.2em black; } .extra-network-cards .card .actions:hover{ box-shadow: 0 0 0.75em 0.75em rgba(0,0,0,0.5) !important; } .extra-network-cards .card .actions .name{ font-size: 1.7em; font-weight: bold; line-break: anywhere; } .extra-network-cards .card .actions:hover .additional{ display: block; } .extra-network-cards .card ul{ margin: 0.25em 0 0.75em 0.25em; cursor: unset; } .extra-network-cards .card ul a{ cursor: pointer; } .extra-network-cards .card ul a:hover{ color: red; } [id*='_prompt_container'] > div { margin: 0!important; } ================================================ FILE: styles.csv ================================================ name,prompt,negative_prompt None,, naifu基础起手式,"masterpiece, best quality, ","lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" ================================================ FILE: tags/temp/emb.txt ================================================ ================================================ FILE: tags/temp/wc.txt ================================================ ================================================ FILE: test/__init__.py ================================================ ================================================ FILE: test/basic_features/__init__.py ================================================ ================================================ FILE: test/basic_features/extras_test.py ================================================ import unittest import requests from gradio.processing_utils import encode_pil_to_base64 from PIL import Image class TestExtrasWorking(unittest.TestCase): def setUp(self): self.url_extras_single = "http://localhost:7860/sdapi/v1/extra-single-image" self.extras_single = { "resize_mode": 0, "show_extras_results": True, "gfpgan_visibility": 0, "codeformer_visibility": 0, "codeformer_weight": 0, "upscaling_resize": 2, "upscaling_resize_w": 128, "upscaling_resize_h": 128, "upscaling_crop": True, "upscaler_1": "None", "upscaler_2": "None", "extras_upscaler_2_visibility": 0, "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) } def test_simple_upscaling_performed(self): self.extras_single["upscaler_1"] = "Lanczos" self.assertEqual(requests.post(self.url_extras_single, json=self.extras_single).status_code, 200) class TestPngInfoWorking(unittest.TestCase): def setUp(self): self.url_png_info = "http://localhost:7860/sdapi/v1/extra-single-image" self.png_info = { "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) } def test_png_info_performed(self): self.assertEqual(requests.post(self.url_png_info, json=self.png_info).status_code, 200) class TestInterrogateWorking(unittest.TestCase): def setUp(self): self.url_interrogate = "http://localhost:7860/sdapi/v1/extra-single-image" self.interrogate = { "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")), "model": "clip" } def test_interrogate_performed(self): self.assertEqual(requests.post(self.url_interrogate, json=self.interrogate).status_code, 200) if __name__ == "__main__": unittest.main() ================================================ FILE: test/basic_features/img2img_test.py ================================================ import unittest import requests from gradio.processing_utils import encode_pil_to_base64 from PIL import Image class TestImg2ImgWorking(unittest.TestCase): def setUp(self): self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" self.simple_img2img = { "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))], "resize_mode": 0, "denoising_strength": 0.75, "mask": None, "mask_blur": 4, "inpainting_fill": 0, "inpaint_full_res": False, "inpaint_full_res_padding": 0, "inpainting_mask_invert": False, "prompt": "example prompt", "styles": [], "seed": -1, "subseed": -1, "subseed_strength": 0, "seed_resize_from_h": -1, "seed_resize_from_w": -1, "batch_size": 1, "n_iter": 1, "steps": 3, "cfg_scale": 7, "width": 64, "height": 64, "restore_faces": False, "tiling": False, "negative_prompt": "", "eta": 0, "s_churn": 0, "s_tmax": 0, "s_tmin": 0, "s_noise": 1, "override_settings": {}, "sampler_index": "Euler a", "include_init_images": False } def test_img2img_simple_performed(self): self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) def test_inpainting_masked_performed(self): self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) def test_inpainting_with_inverted_masked_performed(self): self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) self.simple_img2img["inpainting_mask_invert"] = True self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) def test_img2img_sd_upscale_performed(self): self.simple_img2img["script_name"] = "sd upscale" self.simple_img2img["script_args"] = ["", 8, "Lanczos", 2.0] self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) if __name__ == "__main__": unittest.main() ================================================ FILE: test/basic_features/txt2img_test.py ================================================ import unittest import requests class TestTxt2ImgWorking(unittest.TestCase): def setUp(self): self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" self.simple_txt2img = { "enable_hr": False, "denoising_strength": 0, "firstphase_width": 0, "firstphase_height": 0, "prompt": "example prompt", "styles": [], "seed": -1, "subseed": -1, "subseed_strength": 0, "seed_resize_from_h": -1, "seed_resize_from_w": -1, "batch_size": 1, "n_iter": 1, "steps": 3, "cfg_scale": 7, "width": 64, "height": 64, "restore_faces": False, "tiling": False, "negative_prompt": "", "eta": 0, "s_churn": 0, "s_tmax": 0, "s_tmin": 0, "s_noise": 1, "sampler_index": "Euler a" } def test_txt2img_simple_performed(self): self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_negative_prompt_performed(self): self.simple_txt2img["negative_prompt"] = "example negative prompt" self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_complex_prompt_performed(self): self.simple_txt2img["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]" self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_not_square_image_performed(self): self.simple_txt2img["height"] = 128 self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_hrfix_performed(self): self.simple_txt2img["enable_hr"] = True self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_tiling_performed(self): self.simple_txt2img["tiling"] = True self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_restore_faces_performed(self): self.simple_txt2img["restore_faces"] = True self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_with_vanilla_sampler_performed(self): self.simple_txt2img["sampler_index"] = "PLMS" self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) self.simple_txt2img["sampler_index"] = "DDIM" self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_multiple_batches_performed(self): self.simple_txt2img["n_iter"] = 2 self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) def test_txt2img_batch_performed(self): self.simple_txt2img["batch_size"] = 2 self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200) if __name__ == "__main__": unittest.main() ================================================ FILE: test/basic_features/utils_test.py ================================================ import unittest import requests class UtilsTests(unittest.TestCase): def setUp(self): self.url_options = "http://localhost:7860/sdapi/v1/options" self.url_cmd_flags = "http://localhost:7860/sdapi/v1/cmd-flags" self.url_samplers = "http://localhost:7860/sdapi/v1/samplers" self.url_upscalers = "http://localhost:7860/sdapi/v1/upscalers" self.url_sd_models = "http://localhost:7860/sdapi/v1/sd-models" self.url_hypernetworks = "http://localhost:7860/sdapi/v1/hypernetworks" self.url_face_restorers = "http://localhost:7860/sdapi/v1/face-restorers" self.url_realesrgan_models = "http://localhost:7860/sdapi/v1/realesrgan-models" self.url_prompt_styles = "http://localhost:7860/sdapi/v1/prompt-styles" self.url_embeddings = "http://localhost:7860/sdapi/v1/embeddings" def test_options_get(self): self.assertEqual(requests.get(self.url_options).status_code, 200) def test_options_write(self): response = requests.get(self.url_options) self.assertEqual(response.status_code, 200) pre_value = response.json()["send_seed"] self.assertEqual(requests.post(self.url_options, json={"send_seed":not pre_value}).status_code, 200) response = requests.get(self.url_options) self.assertEqual(response.status_code, 200) self.assertEqual(response.json()["send_seed"], not pre_value) requests.post(self.url_options, json={"send_seed": pre_value}) def test_cmd_flags(self): self.assertEqual(requests.get(self.url_cmd_flags).status_code, 200) def test_samplers(self): self.assertEqual(requests.get(self.url_samplers).status_code, 200) def test_upscalers(self): self.assertEqual(requests.get(self.url_upscalers).status_code, 200) def test_sd_models(self): self.assertEqual(requests.get(self.url_sd_models).status_code, 200) def test_hypernetworks(self): self.assertEqual(requests.get(self.url_hypernetworks).status_code, 200) def test_face_restorers(self): self.assertEqual(requests.get(self.url_face_restorers).status_code, 200) def test_realesrgan_models(self): self.assertEqual(requests.get(self.url_realesrgan_models).status_code, 200) def test_prompt_styles(self): self.assertEqual(requests.get(self.url_prompt_styles).status_code, 200) def test_embeddings(self): self.assertEqual(requests.get(self.url_embeddings).status_code, 200) if __name__ == "__main__": unittest.main() ================================================ FILE: test/server_poll.py ================================================ import unittest import requests import time def run_tests(proc, test_dir): timeout_threshold = 240 start_time = time.time() while time.time()-start_time < timeout_threshold: try: requests.head("http://localhost:7860/") break except requests.exceptions.ConnectionError: if proc.poll() is not None: break if proc.poll() is None: if test_dir is None: test_dir = "test" suite = unittest.TestLoader().discover(test_dir, pattern="*_test.py", top_level_dir="test") result = unittest.TextTestRunner(verbosity=2).run(suite) return len(result.failures) + len(result.errors) else: print("Launch unsuccessful") return 1 ================================================ FILE: textual_inversion_templates/hypernetwork.txt ================================================ a photo of a [filewords] a rendering of a [filewords] a cropped photo of the [filewords] the photo of a [filewords] a photo of a clean [filewords] a photo of a dirty [filewords] a dark photo of the [filewords] a photo of my [filewords] a photo of the cool [filewords] a close-up photo of a [filewords] a bright photo of the [filewords] a cropped photo of a [filewords] a photo of the [filewords] a good photo of the [filewords] a photo of one [filewords] a close-up photo of the [filewords] a rendition of the [filewords] a photo of the clean [filewords] a rendition of a [filewords] a photo of a nice [filewords] a good photo of a [filewords] a photo of the nice [filewords] a photo of the small [filewords] a photo of the weird [filewords] a photo of the large [filewords] a photo of a cool [filewords] a photo of a small [filewords] ================================================ FILE: textual_inversion_templates/none.txt ================================================ picture ================================================ FILE: textual_inversion_templates/style.txt ================================================ a painting, art by [name] a rendering, art by [name] a cropped painting, art by [name] the painting, art by [name] a clean painting, art by [name] a dirty painting, art by [name] a dark painting, art by [name] a picture, art by [name] a cool painting, art by [name] a close-up painting, art by [name] a bright painting, art by [name] a cropped painting, art by [name] a good painting, art by [name] a close-up painting, art by [name] a rendition, art by [name] a nice painting, art by [name] a small painting, art by [name] a weird painting, art by [name] a large painting, art by [name] ================================================ FILE: textual_inversion_templates/style_filewords.txt ================================================ a painting of [filewords], art by [name] a rendering of [filewords], art by [name] a cropped painting of [filewords], art by [name] the painting of [filewords], art by [name] a clean painting of [filewords], art by [name] a dirty painting of [filewords], art by [name] a dark painting of [filewords], art by [name] a picture of [filewords], art by [name] a cool painting of [filewords], art by [name] a close-up painting of [filewords], art by [name] a bright painting of [filewords], art by [name] a cropped painting of [filewords], art by [name] a good painting of [filewords], art by [name] a close-up painting of [filewords], art by [name] a rendition of [filewords], art by [name] a nice painting of [filewords], art by [name] a small painting of [filewords], art by [name] a weird painting of [filewords], art by [name] a large painting of [filewords], art by [name] ================================================ FILE: textual_inversion_templates/subject.txt ================================================ a photo of a [name] a rendering of a [name] a cropped photo of the [name] the photo of a [name] a photo of a clean [name] a photo of a dirty [name] a dark photo of the [name] a photo of my [name] a photo of the cool [name] a close-up photo of a [name] a bright photo of the [name] a cropped photo of a [name] a photo of the [name] a good photo of the [name] a photo of one [name] a close-up photo of the [name] a rendition of the [name] a photo of the clean [name] a rendition of a [name] a photo of a nice [name] a good photo of a [name] a photo of the nice [name] a photo of the small [name] a photo of the weird [name] a photo of the large [name] a photo of a cool [name] a photo of a small [name] ================================================ FILE: textual_inversion_templates/subject_filewords.txt ================================================ a photo of a [name], [filewords] a rendering of a [name], [filewords] a cropped photo of the [name], [filewords] the photo of a [name], [filewords] a photo of a clean [name], [filewords] a photo of a dirty [name], [filewords] a dark photo of the [name], [filewords] a photo of my [name], [filewords] a photo of the cool [name], [filewords] a close-up photo of a [name], [filewords] a bright photo of the [name], [filewords] a cropped photo of a [name], [filewords] a photo of the [name], [filewords] a good photo of the [name], [filewords] a photo of one [name], [filewords] a close-up photo of the [name], [filewords] a rendition of the [name], [filewords] a photo of the clean [name], [filewords] a rendition of a [name], [filewords] a photo of a nice [name], [filewords] a good photo of a [name], [filewords] a photo of the nice [name], [filewords] a photo of the small [name], [filewords] a photo of the weird [name], [filewords] a photo of the large [name], [filewords] a photo of a cool [name], [filewords] a photo of a small [name], [filewords] ================================================ FILE: tmp/stderr.txt ================================================ ^C ================================================ FILE: tmp/stdout.txt ================================================ ================================================ FILE: tmp/tagAutocompletePath.txt ================================================ extensions/a1111-sd-webui-tagcomplete/tags ================================================ FILE: ui-config.json ================================================ { "txt2img/Prompt/visible": true, "txt2img/Prompt/value": "", "txt2img/Negative prompt/visible": true, "txt2img/Negative prompt/value": "", "txt2img/Styles/visible": true, "txt2img/Styles/value": [], "txt2img/Sampling method/visible": true, "txt2img/Sampling method/value": "Euler a", "txt2img/Sampling steps/visible": true, "txt2img/Sampling steps/value": 20, "txt2img/Sampling steps/minimum": 1, "txt2img/Sampling steps/maximum": 150, "txt2img/Sampling steps/step": 1, "txt2img/Width/visible": true, "txt2img/Width/value": 512, "txt2img/Width/minimum": 64, "txt2img/Width/maximum": 2048, "txt2img/Width/step": 8, "txt2img/Height/visible": true, "txt2img/Height/value": 512, "txt2img/Height/minimum": 64, "txt2img/Height/maximum": 2048, "txt2img/Height/step": 8, "txt2img/Batch count/visible": true, "txt2img/Batch count/value": 1, "txt2img/Batch count/minimum": 1, "txt2img/Batch count/maximum": 100, "txt2img/Batch count/step": 1, "txt2img/Batch size/visible": true, "txt2img/Batch size/value": 1, "txt2img/Batch size/minimum": 1, "txt2img/Batch size/maximum": 8, "txt2img/Batch size/step": 1, "txt2img/CFG Scale/visible": true, "txt2img/CFG Scale/value": 7.0, "txt2img/CFG Scale/minimum": 1.0, "txt2img/CFG Scale/maximum": 30.0, "txt2img/CFG Scale/step": 0.5, "txt2img/Seed/visible": true, "txt2img/Seed/value": -1.0, "txt2img/Extra/visible": true, "txt2img/Extra/value": false, "txt2img/Variation seed/visible": true, "txt2img/Variation seed/value": -1.0, "txt2img/Variation strength/visible": true, "txt2img/Variation strength/value": 0.0, "txt2img/Variation strength/minimum": 0, "txt2img/Variation strength/maximum": 1, "txt2img/Variation strength/step": 0.01, "txt2img/Resize seed from width/visible": true, "txt2img/Resize seed from width/value": 0, "txt2img/Resize seed from width/minimum": 0, "txt2img/Resize seed from width/maximum": 2048, "txt2img/Resize seed from width/step": 8, "txt2img/Resize seed from height/visible": true, "txt2img/Resize seed from height/value": 0, "txt2img/Resize seed from height/minimum": 0, "txt2img/Resize seed from height/maximum": 2048, "txt2img/Resize seed from height/step": 8, "txt2img/Restore faces/visible": true, "txt2img/Restore faces/value": false, "txt2img/Tiling/visible": true, "txt2img/Tiling/value": false, "txt2img/Hires. fix/visible": true, "txt2img/Hires. fix/value": false, "txt2img/Upscaler/visible": true, "txt2img/Upscaler/value": "Latent", "txt2img/Hires steps/visible": true, "txt2img/Hires steps/value": 0, "txt2img/Hires steps/minimum": 0, "txt2img/Hires steps/maximum": 150, "txt2img/Hires steps/step": 1, "txt2img/Denoising strength/visible": true, "txt2img/Denoising strength/value": 0.7, "txt2img/Denoising strength/minimum": 0.0, "txt2img/Denoising strength/maximum": 1.0, "txt2img/Denoising strength/step": 0.01, "txt2img/Upscale by/visible": true, "txt2img/Upscale by/value": 2.0, "txt2img/Upscale by/minimum": 1.0, "txt2img/Upscale by/maximum": 4.0, "txt2img/Upscale by/step": 0.05, "txt2img/Resize width to/visible": true, "txt2img/Resize width to/value": 0, "txt2img/Resize width to/minimum": 0, "txt2img/Resize width to/maximum": 2048, "txt2img/Resize width to/step": 8, "txt2img/Resize height to/visible": true, "txt2img/Resize height to/value": 0, "txt2img/Resize height to/minimum": 0, "txt2img/Resize height to/maximum": 2048, "txt2img/Resize height to/step": 8, "txt2img/Override settings/value": null, "customscript/additional_networks.py/txt2img/Enable/visible": true, "customscript/additional_networks.py/txt2img/Enable/value": false, "customscript/additional_networks.py/txt2img/Network module 1/visible": true, "customscript/additional_networks.py/txt2img/Network module 1/value": "LoRA", "customscript/additional_networks.py/txt2img/Model 1/visible": true, "customscript/additional_networks.py/txt2img/Model 1/value": "None", "customscript/additional_networks.py/txt2img/Weight 1/visible": true, "customscript/additional_networks.py/txt2img/Weight 1/value": 1.0, "customscript/additional_networks.py/txt2img/Weight 1/minimum": -1.0, "customscript/additional_networks.py/txt2img/Weight 1/maximum": 2.0, "customscript/additional_networks.py/txt2img/Weight 1/step": 0.05, "customscript/additional_networks.py/txt2img/Network module 2/visible": true, "customscript/additional_networks.py/txt2img/Network module 2/value": "LoRA", "customscript/additional_networks.py/txt2img/Model 2/visible": true, "customscript/additional_networks.py/txt2img/Model 2/value": "None", "customscript/additional_networks.py/txt2img/Weight 2/visible": true, "customscript/additional_networks.py/txt2img/Weight 2/value": 1.0, "customscript/additional_networks.py/txt2img/Weight 2/minimum": -1.0, "customscript/additional_networks.py/txt2img/Weight 2/maximum": 2.0, "customscript/additional_networks.py/txt2img/Weight 2/step": 0.05, "customscript/additional_networks.py/txt2img/Network module 3/visible": true, "customscript/additional_networks.py/txt2img/Network module 3/value": "LoRA", "customscript/additional_networks.py/txt2img/Model 3/visible": true, "customscript/additional_networks.py/txt2img/Model 3/value": "None", "customscript/additional_networks.py/txt2img/Weight 3/visible": true, "customscript/additional_networks.py/txt2img/Weight 3/value": 1.0, "customscript/additional_networks.py/txt2img/Weight 3/minimum": -1.0, "customscript/additional_networks.py/txt2img/Weight 3/maximum": 2.0, "customscript/additional_networks.py/txt2img/Weight 3/step": 0.05, "customscript/additional_networks.py/txt2img/Network module 4/visible": true, "customscript/additional_networks.py/txt2img/Network module 4/value": "LoRA", "customscript/additional_networks.py/txt2img/Model 4/visible": true, "customscript/additional_networks.py/txt2img/Model 4/value": "None", "customscript/additional_networks.py/txt2img/Weight 4/visible": true, "customscript/additional_networks.py/txt2img/Weight 4/value": 1.0, "customscript/additional_networks.py/txt2img/Weight 4/minimum": -1.0, "customscript/additional_networks.py/txt2img/Weight 4/maximum": 2.0, "customscript/additional_networks.py/txt2img/Weight 4/step": 0.05, "customscript/additional_networks.py/txt2img/Network module 5/visible": true, "customscript/additional_networks.py/txt2img/Network module 5/value": "LoRA", "customscript/additional_networks.py/txt2img/Model 5/visible": true, "customscript/additional_networks.py/txt2img/Model 5/value": "None", "customscript/additional_networks.py/txt2img/Weight 5/visible": true, "customscript/additional_networks.py/txt2img/Weight 5/value": 1.0, "customscript/additional_networks.py/txt2img/Weight 5/minimum": -1.0, "customscript/additional_networks.py/txt2img/Weight 5/maximum": 2.0, "customscript/additional_networks.py/txt2img/Weight 5/step": 0.05, "customscript/aesthetic.py/txt2img/Aesthetic weight/visible": true, "customscript/aesthetic.py/txt2img/Aesthetic weight/value": 0.9, "customscript/aesthetic.py/txt2img/Aesthetic weight/minimum": 0, "customscript/aesthetic.py/txt2img/Aesthetic weight/maximum": 1, "customscript/aesthetic.py/txt2img/Aesthetic weight/step": 0.01, "customscript/aesthetic.py/txt2img/Aesthetic steps/visible": true, "customscript/aesthetic.py/txt2img/Aesthetic steps/value": 5, "customscript/aesthetic.py/txt2img/Aesthetic steps/minimum": 0, "customscript/aesthetic.py/txt2img/Aesthetic steps/maximum": 50, "customscript/aesthetic.py/txt2img/Aesthetic steps/step": 1, "customscript/aesthetic.py/txt2img/Aesthetic learning rate/visible": true, "customscript/aesthetic.py/txt2img/Aesthetic learning rate/value": "0.0001", "customscript/aesthetic.py/txt2img/Slerp interpolation/visible": true, "customscript/aesthetic.py/txt2img/Slerp interpolation/value": false, "customscript/aesthetic.py/txt2img/Aesthetic imgs embedding/visible": true, "customscript/aesthetic.py/txt2img/Aesthetic imgs embedding/value": "None", "customscript/aesthetic.py/txt2img/Aesthetic text for imgs/visible": true, "customscript/aesthetic.py/txt2img/Aesthetic text for imgs/value": "", "customscript/aesthetic.py/txt2img/Slerp angle/visible": true, "customscript/aesthetic.py/txt2img/Slerp angle/value": 0.1, "customscript/aesthetic.py/txt2img/Slerp angle/minimum": 0, "customscript/aesthetic.py/txt2img/Slerp angle/maximum": 1, "customscript/aesthetic.py/txt2img/Slerp angle/step": 0.01, "customscript/aesthetic.py/txt2img/Is negative text/visible": true, "customscript/aesthetic.py/txt2img/Is negative text/value": false, "txt2img/Script/visible": true, "txt2img/Script/value": "None", "customscript/prompt_matrix.py/txt2img/Put variable parts at start of prompt/visible": true, "customscript/prompt_matrix.py/txt2img/Put variable parts at start of prompt/value": false, "customscript/prompt_matrix.py/txt2img/Use different seed for each picture/visible": true, "customscript/prompt_matrix.py/txt2img/Use different seed for each picture/value": false, "customscript/prompt_matrix.py/txt2img/Select prompt/visible": true, "customscript/prompt_matrix.py/txt2img/Select prompt/value": "positive", "customscript/prompt_matrix.py/txt2img/Select joining char/visible": true, "customscript/prompt_matrix.py/txt2img/Select joining char/value": "comma", "customscript/prompt_matrix.py/txt2img/Grid margins (px)/visible": true, "customscript/prompt_matrix.py/txt2img/Grid margins (px)/value": 0, "customscript/prompt_matrix.py/txt2img/Grid margins (px)/minimum": 0, "customscript/prompt_matrix.py/txt2img/Grid margins (px)/maximum": 500, "customscript/prompt_matrix.py/txt2img/Grid margins (px)/step": 2, "customscript/prompts_from_file.py/txt2img/Iterate seed every line/visible": true, "customscript/prompts_from_file.py/txt2img/Iterate seed every line/value": false, "customscript/prompts_from_file.py/txt2img/Use same random seed for all lines/visible": true, "customscript/prompts_from_file.py/txt2img/Use same random seed for all lines/value": false, "customscript/prompts_from_file.py/txt2img/List of prompt inputs/visible": true, "customscript/prompts_from_file.py/txt2img/List of prompt inputs/value": "", "customscript/xyz_grid.py/txt2img/X type/visible": true, "customscript/xyz_grid.py/txt2img/X type/value": "Seed", "customscript/xyz_grid.py/txt2img/X values/visible": true, "customscript/xyz_grid.py/txt2img/X values/value": "", "customscript/xyz_grid.py/txt2img/Y type/visible": true, "customscript/xyz_grid.py/txt2img/Y type/value": "Nothing", "customscript/xyz_grid.py/txt2img/Y values/visible": true, "customscript/xyz_grid.py/txt2img/Y values/value": "", "customscript/xyz_grid.py/txt2img/Z type/visible": true, "customscript/xyz_grid.py/txt2img/Z type/value": "Nothing", "customscript/xyz_grid.py/txt2img/Z values/visible": true, "customscript/xyz_grid.py/txt2img/Z values/value": "", "customscript/xyz_grid.py/txt2img/Draw legend/visible": true, "customscript/xyz_grid.py/txt2img/Draw legend/value": true, "customscript/xyz_grid.py/txt2img/Keep -1 for seeds/visible": true, "customscript/xyz_grid.py/txt2img/Keep -1 for seeds/value": false, "customscript/xyz_grid.py/txt2img/Include Sub Images/visible": true, "customscript/xyz_grid.py/txt2img/Include Sub Images/value": false, "customscript/xyz_grid.py/txt2img/Include Sub Grids/visible": true, "customscript/xyz_grid.py/txt2img/Include Sub Grids/value": false, "customscript/xyz_grid.py/txt2img/Grid margins (px)/visible": true, "customscript/xyz_grid.py/txt2img/Grid margins (px)/value": 0, "customscript/xyz_grid.py/txt2img/Grid margins (px)/minimum": 0, "customscript/xyz_grid.py/txt2img/Grid margins (px)/maximum": 500, "customscript/xyz_grid.py/txt2img/Grid margins (px)/step": 2, "img2img/Prompt/visible": true, "img2img/Prompt/value": "", "img2img/Negative prompt/visible": true, "img2img/Negative prompt/value": "", "img2img/Styles/visible": true, "img2img/Styles/value": [], "img2img/Input directory/visible": true, "img2img/Input directory/value": "", "img2img/Output directory/visible": true, "img2img/Output directory/value": "", "img2img/Inpaint batch mask directory (required for inpaint batch processing only)/visible": true, "img2img/Inpaint batch mask directory (required for inpaint batch processing only)/value": "", "img2img/Resize mode/visible": true, "img2img/Resize mode/value": "Just resize", "img2img/Mask blur/visible": true, "img2img/Mask blur/value": 4, "img2img/Mask blur/minimum": 0, "img2img/Mask blur/maximum": 64, "img2img/Mask blur/step": 1, "img2img/Mask transparency/value": 0, "img2img/Mask transparency/minimum": 0, "img2img/Mask transparency/maximum": 100, "img2img/Mask transparency/step": 1, "img2img/Mask mode/visible": true, "img2img/Mask mode/value": "Inpaint masked", "img2img/Masked content/visible": true, "img2img/Masked content/value": "original", "img2img/Inpaint area/visible": true, "img2img/Inpaint area/value": "Whole picture", "img2img/Only masked padding, pixels/visible": true, "img2img/Only masked padding, pixels/value": 32, "img2img/Only masked padding, pixels/minimum": 0, "img2img/Only masked padding, pixels/maximum": 256, "img2img/Only masked padding, pixels/step": 4, "img2img/Sampling method/visible": true, "img2img/Sampling method/value": "Euler a", "img2img/Sampling steps/visible": true, "img2img/Sampling steps/value": 20, "img2img/Sampling steps/minimum": 1, "img2img/Sampling steps/maximum": 150, "img2img/Sampling steps/step": 1, "img2img/Width/visible": true, "img2img/Width/value": 512, "img2img/Width/minimum": 64, "img2img/Width/maximum": 2048, "img2img/Width/step": 8, "img2img/Height/visible": true, "img2img/Height/value": 512, "img2img/Height/minimum": 64, "img2img/Height/maximum": 2048, "img2img/Height/step": 8, "img2img/Batch count/visible": true, "img2img/Batch count/value": 1, "img2img/Batch count/minimum": 1, "img2img/Batch count/maximum": 100, "img2img/Batch count/step": 1, "img2img/Batch size/visible": true, "img2img/Batch size/value": 1, "img2img/Batch size/minimum": 1, "img2img/Batch size/maximum": 8, "img2img/Batch size/step": 1, "img2img/CFG Scale/visible": true, "img2img/CFG Scale/value": 7.0, "img2img/CFG Scale/minimum": 1.0, "img2img/CFG Scale/maximum": 30.0, "img2img/CFG Scale/step": 0.5, "img2img/Image CFG Scale/value": 1.5, "img2img/Image CFG Scale/minimum": 0, "img2img/Image CFG Scale/maximum": 3.0, "img2img/Image CFG Scale/step": 0.05, "img2img/Denoising strength/visible": true, "img2img/Denoising strength/value": 0.75, "img2img/Denoising strength/minimum": 0.0, "img2img/Denoising strength/maximum": 1.0, "img2img/Denoising strength/step": 0.01, "img2img/Seed/visible": true, "img2img/Seed/value": -1.0, "img2img/Extra/visible": true, "img2img/Extra/value": false, "img2img/Variation seed/visible": true, "img2img/Variation seed/value": -1.0, "img2img/Variation strength/visible": true, "img2img/Variation strength/value": 0.0, "img2img/Variation strength/minimum": 0, "img2img/Variation strength/maximum": 1, "img2img/Variation strength/step": 0.01, "img2img/Resize seed from width/visible": true, "img2img/Resize seed from width/value": 0, "img2img/Resize seed from width/minimum": 0, "img2img/Resize seed from width/maximum": 2048, "img2img/Resize seed from width/step": 8, "img2img/Resize seed from height/visible": true, "img2img/Resize seed from height/value": 0, "img2img/Resize seed from height/minimum": 0, "img2img/Resize seed from height/maximum": 2048, "img2img/Resize seed from height/step": 8, "img2img/Restore faces/visible": true, "img2img/Restore faces/value": false, "img2img/Tiling/visible": true, "img2img/Tiling/value": false, "img2img/Override settings/value": null, "customscript/additional_networks.py/img2img/Enable/visible": true, "customscript/additional_networks.py/img2img/Enable/value": false, "customscript/additional_networks.py/img2img/Network module 1/visible": true, "customscript/additional_networks.py/img2img/Network module 1/value": "LoRA", "customscript/additional_networks.py/img2img/Model 1/visible": true, "customscript/additional_networks.py/img2img/Model 1/value": "None", "customscript/additional_networks.py/img2img/Weight 1/visible": true, "customscript/additional_networks.py/img2img/Weight 1/value": 1.0, "customscript/additional_networks.py/img2img/Weight 1/minimum": -1.0, "customscript/additional_networks.py/img2img/Weight 1/maximum": 2.0, "customscript/additional_networks.py/img2img/Weight 1/step": 0.05, "customscript/additional_networks.py/img2img/Network module 2/visible": true, "customscript/additional_networks.py/img2img/Network module 2/value": "LoRA", "customscript/additional_networks.py/img2img/Model 2/visible": true, "customscript/additional_networks.py/img2img/Model 2/value": "None", "customscript/additional_networks.py/img2img/Weight 2/visible": true, "customscript/additional_networks.py/img2img/Weight 2/value": 1.0, "customscript/additional_networks.py/img2img/Weight 2/minimum": -1.0, "customscript/additional_networks.py/img2img/Weight 2/maximum": 2.0, "customscript/additional_networks.py/img2img/Weight 2/step": 0.05, "customscript/additional_networks.py/img2img/Network module 3/visible": true, "customscript/additional_networks.py/img2img/Network module 3/value": "LoRA", "customscript/additional_networks.py/img2img/Model 3/visible": true, "customscript/additional_networks.py/img2img/Model 3/value": "None", "customscript/additional_networks.py/img2img/Weight 3/visible": true, "customscript/additional_networks.py/img2img/Weight 3/value": 1.0, "customscript/additional_networks.py/img2img/Weight 3/minimum": -1.0, "customscript/additional_networks.py/img2img/Weight 3/maximum": 2.0, "customscript/additional_networks.py/img2img/Weight 3/step": 0.05, "customscript/additional_networks.py/img2img/Network module 4/visible": true, "customscript/additional_networks.py/img2img/Network module 4/value": "LoRA", "customscript/additional_networks.py/img2img/Model 4/visible": true, "customscript/additional_networks.py/img2img/Model 4/value": "None", "customscript/additional_networks.py/img2img/Weight 4/visible": true, "customscript/additional_networks.py/img2img/Weight 4/value": 1.0, "customscript/additional_networks.py/img2img/Weight 4/minimum": -1.0, "customscript/additional_networks.py/img2img/Weight 4/maximum": 2.0, "customscript/additional_networks.py/img2img/Weight 4/step": 0.05, "customscript/additional_networks.py/img2img/Network module 5/visible": true, "customscript/additional_networks.py/img2img/Network module 5/value": "LoRA", "customscript/additional_networks.py/img2img/Model 5/visible": true, "customscript/additional_networks.py/img2img/Model 5/value": "None", "customscript/additional_networks.py/img2img/Weight 5/visible": true, "customscript/additional_networks.py/img2img/Weight 5/value": 1.0, "customscript/additional_networks.py/img2img/Weight 5/minimum": -1.0, "customscript/additional_networks.py/img2img/Weight 5/maximum": 2.0, "customscript/additional_networks.py/img2img/Weight 5/step": 0.05, "customscript/aesthetic.py/img2img/Aesthetic weight/visible": true, "customscript/aesthetic.py/img2img/Aesthetic weight/value": 0.9, "customscript/aesthetic.py/img2img/Aesthetic weight/minimum": 0, "customscript/aesthetic.py/img2img/Aesthetic weight/maximum": 1, "customscript/aesthetic.py/img2img/Aesthetic weight/step": 0.01, "customscript/aesthetic.py/img2img/Aesthetic steps/visible": true, "customscript/aesthetic.py/img2img/Aesthetic steps/value": 5, "customscript/aesthetic.py/img2img/Aesthetic steps/minimum": 0, "customscript/aesthetic.py/img2img/Aesthetic steps/maximum": 50, "customscript/aesthetic.py/img2img/Aesthetic steps/step": 1, "customscript/aesthetic.py/img2img/Aesthetic learning rate/visible": true, "customscript/aesthetic.py/img2img/Aesthetic learning rate/value": "0.0001", "customscript/aesthetic.py/img2img/Slerp interpolation/visible": true, "customscript/aesthetic.py/img2img/Slerp interpolation/value": false, "customscript/aesthetic.py/img2img/Aesthetic imgs embedding/visible": true, "customscript/aesthetic.py/img2img/Aesthetic imgs embedding/value": "None", "customscript/aesthetic.py/img2img/Aesthetic text for imgs/visible": true, "customscript/aesthetic.py/img2img/Aesthetic text for imgs/value": "", "customscript/aesthetic.py/img2img/Slerp angle/visible": true, "customscript/aesthetic.py/img2img/Slerp angle/value": 0.1, "customscript/aesthetic.py/img2img/Slerp angle/minimum": 0, "customscript/aesthetic.py/img2img/Slerp angle/maximum": 1, "customscript/aesthetic.py/img2img/Slerp angle/step": 0.01, "customscript/aesthetic.py/img2img/Is negative text/visible": true, "customscript/aesthetic.py/img2img/Is negative text/value": false, "img2img/Script/visible": true, "img2img/Script/value": "None", "customscript/img2imgalt.py/img2img/Override `Sampling method` to Euler?(this method is built for it)/visible": true, "customscript/img2imgalt.py/img2img/Override `Sampling method` to Euler?(this method is built for it)/value": true, "customscript/img2imgalt.py/img2img/Override `prompt` to the same value as `original prompt`?(and `negative prompt`)/visible": true, "customscript/img2imgalt.py/img2img/Override `prompt` to the same value as `original prompt`?(and `negative prompt`)/value": true, "customscript/img2imgalt.py/img2img/Original prompt/visible": true, "customscript/img2imgalt.py/img2img/Original prompt/value": "", "customscript/img2imgalt.py/img2img/Original negative prompt/visible": true, "customscript/img2imgalt.py/img2img/Original negative prompt/value": "", "customscript/img2imgalt.py/img2img/Override `Sampling Steps` to the same value as `Decode steps`?/visible": true, "customscript/img2imgalt.py/img2img/Override `Sampling Steps` to the same value as `Decode steps`?/value": true, "customscript/img2imgalt.py/img2img/Decode steps/visible": true, "customscript/img2imgalt.py/img2img/Decode steps/value": 50, "customscript/img2imgalt.py/img2img/Decode steps/minimum": 1, "customscript/img2imgalt.py/img2img/Decode steps/maximum": 150, "customscript/img2imgalt.py/img2img/Decode steps/step": 1, "customscript/img2imgalt.py/img2img/Override `Denoising strength` to 1?/visible": true, "customscript/img2imgalt.py/img2img/Override `Denoising strength` to 1?/value": true, "customscript/img2imgalt.py/img2img/Decode CFG scale/visible": true, "customscript/img2imgalt.py/img2img/Decode CFG scale/value": 1.0, "customscript/img2imgalt.py/img2img/Decode CFG scale/minimum": 0.0, "customscript/img2imgalt.py/img2img/Decode CFG scale/maximum": 15.0, "customscript/img2imgalt.py/img2img/Decode CFG scale/step": 0.1, "customscript/img2imgalt.py/img2img/Randomness/visible": true, "customscript/img2imgalt.py/img2img/Randomness/value": 0.0, "customscript/img2imgalt.py/img2img/Randomness/minimum": 0.0, "customscript/img2imgalt.py/img2img/Randomness/maximum": 1.0, "customscript/img2imgalt.py/img2img/Randomness/step": 0.01, "customscript/img2imgalt.py/img2img/Sigma adjustment for finding noise for image/visible": true, "customscript/img2imgalt.py/img2img/Sigma adjustment for finding noise for image/value": false, "customscript/loopback.py/img2img/Loops/visible": true, "customscript/loopback.py/img2img/Loops/value": 4, "customscript/loopback.py/img2img/Loops/minimum": 1, "customscript/loopback.py/img2img/Loops/maximum": 32, "customscript/loopback.py/img2img/Loops/step": 1, "customscript/loopback.py/img2img/Denoising strength change factor/visible": true, "customscript/loopback.py/img2img/Denoising strength change factor/value": 1, "customscript/loopback.py/img2img/Denoising strength change factor/minimum": 0.9, "customscript/loopback.py/img2img/Denoising strength change factor/maximum": 1.1, "customscript/loopback.py/img2img/Denoising strength change factor/step": 0.01, "customscript/loopback.py/img2img/Append interrogated prompt at each iteration/visible": true, "customscript/loopback.py/img2img/Append interrogated prompt at each iteration/value": "None", "customscript/outpainting_mk_2.py/img2img/Pixels to expand/visible": true, "customscript/outpainting_mk_2.py/img2img/Pixels to expand/value": 128, "customscript/outpainting_mk_2.py/img2img/Pixels to expand/minimum": 8, "customscript/outpainting_mk_2.py/img2img/Pixels to expand/maximum": 256, "customscript/outpainting_mk_2.py/img2img/Pixels to expand/step": 8, "customscript/outpainting_mk_2.py/img2img/Mask blur/visible": true, "customscript/outpainting_mk_2.py/img2img/Mask blur/value": 8, "customscript/outpainting_mk_2.py/img2img/Mask blur/minimum": 0, "customscript/outpainting_mk_2.py/img2img/Mask blur/maximum": 64, "customscript/outpainting_mk_2.py/img2img/Mask blur/step": 1, "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/visible": true, "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/value": 1.0, "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/minimum": 0.0, "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/maximum": 4.0, "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/step": 0.01, "customscript/outpainting_mk_2.py/img2img/Color variation/visible": true, "customscript/outpainting_mk_2.py/img2img/Color variation/value": 0.05, "customscript/outpainting_mk_2.py/img2img/Color variation/minimum": 0.0, "customscript/outpainting_mk_2.py/img2img/Color variation/maximum": 1.0, "customscript/outpainting_mk_2.py/img2img/Color variation/step": 0.01, "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/visible": true, "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/value": 128, "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/minimum": 8, "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/maximum": 256, "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/step": 8, "customscript/poor_mans_outpainting.py/img2img/Mask blur/visible": true, "customscript/poor_mans_outpainting.py/img2img/Mask blur/value": 4, "customscript/poor_mans_outpainting.py/img2img/Mask blur/minimum": 0, "customscript/poor_mans_outpainting.py/img2img/Mask blur/maximum": 64, "customscript/poor_mans_outpainting.py/img2img/Mask blur/step": 1, "customscript/poor_mans_outpainting.py/img2img/Masked content/visible": true, "customscript/poor_mans_outpainting.py/img2img/Masked content/value": "fill", "customscript/prompt_matrix.py/img2img/Put variable parts at start of prompt/visible": true, "customscript/prompt_matrix.py/img2img/Put variable parts at start of prompt/value": false, "customscript/prompt_matrix.py/img2img/Use different seed for each picture/visible": true, "customscript/prompt_matrix.py/img2img/Use different seed for each picture/value": false, "customscript/prompt_matrix.py/img2img/Select prompt/visible": true, "customscript/prompt_matrix.py/img2img/Select prompt/value": "positive", "customscript/prompt_matrix.py/img2img/Select joining char/visible": true, "customscript/prompt_matrix.py/img2img/Select joining char/value": "comma", "customscript/prompt_matrix.py/img2img/Grid margins (px)/visible": true, "customscript/prompt_matrix.py/img2img/Grid margins (px)/value": 0, "customscript/prompt_matrix.py/img2img/Grid margins (px)/minimum": 0, "customscript/prompt_matrix.py/img2img/Grid margins (px)/maximum": 500, "customscript/prompt_matrix.py/img2img/Grid margins (px)/step": 2, "customscript/prompts_from_file.py/img2img/Iterate seed every line/visible": true, "customscript/prompts_from_file.py/img2img/Iterate seed every line/value": false, "customscript/prompts_from_file.py/img2img/Use same random seed for all lines/visible": true, "customscript/prompts_from_file.py/img2img/Use same random seed for all lines/value": false, "customscript/prompts_from_file.py/img2img/List of prompt inputs/visible": true, "customscript/prompts_from_file.py/img2img/List of prompt inputs/value": "", "customscript/sd_upscale.py/img2img/Tile overlap/visible": true, "customscript/sd_upscale.py/img2img/Tile overlap/value": 64, "customscript/sd_upscale.py/img2img/Tile overlap/minimum": 0, "customscript/sd_upscale.py/img2img/Tile overlap/maximum": 256, "customscript/sd_upscale.py/img2img/Tile overlap/step": 16, "customscript/sd_upscale.py/img2img/Scale Factor/visible": true, "customscript/sd_upscale.py/img2img/Scale Factor/value": 2.0, "customscript/sd_upscale.py/img2img/Scale Factor/minimum": 1.0, "customscript/sd_upscale.py/img2img/Scale Factor/maximum": 4.0, "customscript/sd_upscale.py/img2img/Scale Factor/step": 0.05, "customscript/sd_upscale.py/img2img/Upscaler/visible": true, "customscript/sd_upscale.py/img2img/Upscaler/value": "None", "customscript/xyz_grid.py/img2img/X type/visible": true, "customscript/xyz_grid.py/img2img/X type/value": "Seed", "customscript/xyz_grid.py/img2img/X values/visible": true, "customscript/xyz_grid.py/img2img/X values/value": "", "customscript/xyz_grid.py/img2img/Y type/visible": true, "customscript/xyz_grid.py/img2img/Y type/value": "Nothing", "customscript/xyz_grid.py/img2img/Y values/visible": true, "customscript/xyz_grid.py/img2img/Y values/value": "", "customscript/xyz_grid.py/img2img/Z type/visible": true, "customscript/xyz_grid.py/img2img/Z type/value": "Nothing", "customscript/xyz_grid.py/img2img/Z values/visible": true, "customscript/xyz_grid.py/img2img/Z values/value": "", "customscript/xyz_grid.py/img2img/Draw legend/visible": true, "customscript/xyz_grid.py/img2img/Draw legend/value": true, "customscript/xyz_grid.py/img2img/Keep -1 for seeds/visible": true, "customscript/xyz_grid.py/img2img/Keep -1 for seeds/value": false, "customscript/xyz_grid.py/img2img/Include Sub Images/visible": true, "customscript/xyz_grid.py/img2img/Include Sub Images/value": false, "customscript/xyz_grid.py/img2img/Include Sub Grids/visible": true, "customscript/xyz_grid.py/img2img/Include Sub Grids/value": false, "customscript/xyz_grid.py/img2img/Grid margins (px)/visible": true, "customscript/xyz_grid.py/img2img/Grid margins (px)/value": 0, "customscript/xyz_grid.py/img2img/Grid margins (px)/minimum": 0, "customscript/xyz_grid.py/img2img/Grid margins (px)/maximum": 500, "customscript/xyz_grid.py/img2img/Grid margins (px)/step": 2, "extras/Input directory/visible": true, "extras/Input directory/value": "", "extras/Output directory/visible": true, "extras/Output directory/value": "", "extras/Show result images/visible": true, "extras/Show result images/value": true, "customscript/postprocessing_upscale.py/extras/Resize/visible": true, "customscript/postprocessing_upscale.py/extras/Resize/value": 4, "customscript/postprocessing_upscale.py/extras/Resize/minimum": 1.0, "customscript/postprocessing_upscale.py/extras/Resize/maximum": 8.0, "customscript/postprocessing_upscale.py/extras/Resize/step": 0.05, "customscript/postprocessing_upscale.py/extras/Width/visible": true, "customscript/postprocessing_upscale.py/extras/Width/value": 512, "customscript/postprocessing_upscale.py/extras/Height/visible": true, "customscript/postprocessing_upscale.py/extras/Height/value": 512, "customscript/postprocessing_upscale.py/extras/Crop to fit/visible": true, "customscript/postprocessing_upscale.py/extras/Crop to fit/value": true, "customscript/postprocessing_upscale.py/extras/Upscaler 1/visible": true, "customscript/postprocessing_upscale.py/extras/Upscaler 1/value": "None", "customscript/postprocessing_upscale.py/extras/Upscaler 2/visible": true, "customscript/postprocessing_upscale.py/extras/Upscaler 2/value": "None", "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/visible": true, "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/value": 0.0, "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/minimum": 0.0, "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/maximum": 1.0, "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/step": 0.001, "customscript/postprocessing_gfpgan.py/extras/GFPGAN visibility/visible": true, "customscript/postprocessing_gfpgan.py/extras/GFPGAN visibility/value": 0, "customscript/postprocessing_gfpgan.py/extras/GFPGAN visibility/minimum": 0.0, "customscript/postprocessing_gfpgan.py/extras/GFPGAN visibility/maximum": 1.0, "customscript/postprocessing_gfpgan.py/extras/GFPGAN visibility/step": 0.001, "customscript/postprocessing_codeformer.py/extras/CodeFormer visibility/visible": true, "customscript/postprocessing_codeformer.py/extras/CodeFormer visibility/value": 0, "customscript/postprocessing_codeformer.py/extras/CodeFormer visibility/minimum": 0.0, "customscript/postprocessing_codeformer.py/extras/CodeFormer visibility/maximum": 1.0, "customscript/postprocessing_codeformer.py/extras/CodeFormer visibility/step": 0.001, "customscript/postprocessing_codeformer.py/extras/CodeFormer weight (0 = maximum effect, 1 = minimum effect)/visible": true, "customscript/postprocessing_codeformer.py/extras/CodeFormer weight (0 = maximum effect, 1 = minimum effect)/value": 0, "customscript/postprocessing_codeformer.py/extras/CodeFormer weight (0 = maximum effect, 1 = minimum effect)/minimum": 0.0, "customscript/postprocessing_codeformer.py/extras/CodeFormer weight (0 = maximum effect, 1 = minimum effect)/maximum": 1.0, "customscript/postprocessing_codeformer.py/extras/CodeFormer weight (0 = maximum effect, 1 = minimum effect)/step": 0.001, "modelmerger/Primary model (A)/visible": true, "modelmerger/Primary model (A)/value": null, "modelmerger/Secondary model (B)/visible": true, "modelmerger/Secondary model (B)/value": null, "modelmerger/Tertiary model (C)/visible": true, "modelmerger/Tertiary model (C)/value": null, "modelmerger/Custom Name (Optional)/visible": true, "modelmerger/Custom Name (Optional)/value": "", "modelmerger/Multiplier (M) - set to 0 to get model A/visible": true, "modelmerger/Multiplier (M) - set to 0 to get model A/value": 0.3, "modelmerger/Multiplier (M) - set to 0 to get model A/minimum": 0.0, "modelmerger/Multiplier (M) - set to 0 to get model A/maximum": 1.0, "modelmerger/Multiplier (M) - set to 0 to get model A/step": 0.05, "modelmerger/Interpolation Method/visible": true, "modelmerger/Interpolation Method/value": "Weighted sum", "modelmerger/Checkpoint format/visible": true, "modelmerger/Checkpoint format/value": "ckpt", "modelmerger/Save as float16/visible": true, "modelmerger/Save as float16/value": false, "modelmerger/Copy config from/visible": true, "modelmerger/Copy config from/value": "A, B or C", "modelmerger/Bake in VAE/visible": true, "modelmerger/Bake in VAE/value": "None", "modelmerger/Discard weights with matching name/visible": true, "modelmerger/Discard weights with matching name/value": "", "train/Name/visible": true, "train/Name/value": "", "train/Initialization text/visible": true, "train/Initialization text/value": "*", "train/Number of vectors per token/visible": true, "train/Number of vectors per token/value": 1, "train/Number of vectors per token/minimum": 1, "train/Number of vectors per token/maximum": 75, "train/Number of vectors per token/step": 1, "train/Overwrite Old Embedding/visible": true, "train/Overwrite Old Embedding/value": false, "train/Enter hypernetwork layer structure/visible": true, "train/Enter hypernetwork layer structure/value": "1, 2, 1", "train/Select activation function of hypernetwork. Recommended : Swish / Linear(none)/visible": true, "train/Select activation function of hypernetwork. Recommended : Swish / Linear(none)/value": "linear", "train/Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise/visible": true, "train/Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise/value": "Normal", "train/Add layer normalization/visible": true, "train/Add layer normalization/value": false, "train/Use dropout/visible": true, "train/Use dropout/value": false, "train/Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15/visible": true, "train/Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15/value": "0, 0, 0", "train/Overwrite Old Hypernetwork/visible": true, "train/Overwrite Old Hypernetwork/value": false, "train/Source directory/visible": true, "train/Source directory/value": "", "train/Destination directory/visible": true, "train/Destination directory/value": "", "train/Width/visible": true, "train/Width/value": 512, "train/Width/minimum": 64, "train/Width/maximum": 2048, "train/Width/step": 8, "train/Height/visible": true, "train/Height/value": 512, "train/Height/minimum": 64, "train/Height/maximum": 2048, "train/Height/step": 8, "train/Existing Caption txt Action/visible": true, "train/Existing Caption txt Action/value": "ignore", "train/Create flipped copies/visible": true, "train/Create flipped copies/value": false, "train/Split oversized images/visible": true, "train/Split oversized images/value": false, "train/Auto focal point crop/visible": true, "train/Auto focal point crop/value": false, "train/Auto-sized crop/visible": true, "train/Auto-sized crop/value": false, "train/Use BLIP for caption/visible": true, "train/Use BLIP for caption/value": false, "train/Use deepbooru for caption/visible": true, "train/Use deepbooru for caption/value": false, "train/Split image threshold/visible": true, "train/Split image threshold/value": 0.5, "train/Split image threshold/minimum": 0.0, "train/Split image threshold/maximum": 1.0, "train/Split image threshold/step": 0.05, "train/Split image overlap ratio/visible": true, "train/Split image overlap ratio/value": 0.2, "train/Split image overlap ratio/minimum": 0.0, "train/Split image overlap ratio/maximum": 0.9, "train/Split image overlap ratio/step": 0.05, "train/Focal point face weight/visible": true, "train/Focal point face weight/value": 0.9, "train/Focal point face weight/minimum": 0.0, "train/Focal point face weight/maximum": 1.0, "train/Focal point face weight/step": 0.05, "train/Focal point entropy weight/visible": true, "train/Focal point entropy weight/value": 0.15, "train/Focal point entropy weight/minimum": 0.0, "train/Focal point entropy weight/maximum": 1.0, "train/Focal point entropy weight/step": 0.05, "train/Focal point edges weight/visible": true, "train/Focal point edges weight/value": 0.5, "train/Focal point edges weight/minimum": 0.0, "train/Focal point edges weight/maximum": 1.0, "train/Focal point edges weight/step": 0.05, "train/Create debug image/visible": true, "train/Create debug image/value": false, "train/Dimension lower bound/visible": true, "train/Dimension lower bound/value": 384, "train/Dimension lower bound/minimum": 64, "train/Dimension lower bound/maximum": 2048, "train/Dimension lower bound/step": 8, "train/Dimension upper bound/visible": true, "train/Dimension upper bound/value": 768, "train/Dimension upper bound/minimum": 64, "train/Dimension upper bound/maximum": 2048, "train/Dimension upper bound/step": 8, "train/Area lower bound/visible": true, "train/Area lower bound/value": 4096, "train/Area lower bound/minimum": 4096, "train/Area lower bound/maximum": 4194304, "train/Area lower bound/step": 1, "train/Area upper bound/visible": true, "train/Area upper bound/value": 409600, "train/Area upper bound/minimum": 4096, "train/Area upper bound/maximum": 4194304, "train/Area upper bound/step": 1, "train/Resizing objective/visible": true, "train/Resizing objective/value": "Maximize area", "train/Error threshold/visible": true, "train/Error threshold/value": 0.1, "train/Error threshold/minimum": 0, "train/Error threshold/maximum": 1, "train/Error threshold/step": 0.01, "train/Embedding/visible": true, "train/Embedding/value": null, "train/Hypernetwork/visible": true, "train/Hypernetwork/value": null, "train/Embedding Learning rate/visible": true, "train/Embedding Learning rate/value": "0.005", "train/Hypernetwork Learning rate/visible": true, "train/Hypernetwork Learning rate/value": "0.00001", "train/Gradient Clipping/visible": true, "train/Gradient Clipping/value": "disabled", "train/Batch size/visible": true, "train/Batch size/value": 1, "train/Gradient accumulation steps/visible": true, "train/Gradient accumulation steps/value": 1, "train/Dataset directory/visible": true, "train/Dataset directory/value": "", "train/Log directory/visible": true, "train/Log directory/value": "textual_inversion", "train/Prompt template/visible": true, "train/Prompt template/value": "style_filewords.txt", "train/Do not resize images/visible": true, "train/Do not resize images/value": false, "train/Max steps/visible": true, "train/Max steps/value": 100000, "train/Save an image to log directory every N steps, 0 to disable/visible": true, "train/Save an image to log directory every N steps, 0 to disable/value": 500, "train/Save a copy of embedding to log directory every N steps, 0 to disable/visible": true, "train/Save a copy of embedding to log directory every N steps, 0 to disable/value": 500, "train/Use PNG alpha channel as loss weight/visible": true, "train/Use PNG alpha channel as loss weight/value": false, "train/Save images with embedding in PNG chunks/visible": true, "train/Save images with embedding in PNG chunks/value": true, "train/Read parameters (prompt, etc...) from txt2img tab when making previews/visible": true, "train/Read parameters (prompt, etc...) from txt2img tab when making previews/value": false, "train/Shuffle tags by ',' when creating prompts./visible": true, "train/Shuffle tags by ',' when creating prompts./value": false, "train/Drop out tags when creating prompts./visible": true, "train/Drop out tags when creating prompts./value": 0, "train/Drop out tags when creating prompts./minimum": 0, "train/Drop out tags when creating prompts./maximum": 1, "train/Drop out tags when creating prompts./step": 0.1, "train/Choose latent sampling method/visible": true, "train/Choose latent sampling method/value": "once", "customscript/additional_networks.py/txt2img/Separate UNet/Text Encoder weights/visible": true, "customscript/additional_networks.py/txt2img/Separate UNet/Text Encoder weights/value": false, "txt2img/Weight 1/visible": true, "txt2img/Weight 1/value": 1.0, "txt2img/Weight 1/minimum": -1.0, "txt2img/Weight 1/maximum": 2.0, "txt2img/Weight 1/step": 0.05, "customscript/additional_networks.py/txt2img/UNet Weight 1/value": 1.0, "customscript/additional_networks.py/txt2img/UNet Weight 1/minimum": -1.0, "customscript/additional_networks.py/txt2img/UNet Weight 1/maximum": 2.0, "customscript/additional_networks.py/txt2img/UNet Weight 1/step": 0.05, "customscript/additional_networks.py/txt2img/TEnc Weight 1/value": 1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 1/minimum": -1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 1/maximum": 2.0, "customscript/additional_networks.py/txt2img/TEnc Weight 1/step": 0.05, "txt2img/Weight 2/visible": true, "txt2img/Weight 2/value": 1.0, "txt2img/Weight 2/minimum": -1.0, "txt2img/Weight 2/maximum": 2.0, "txt2img/Weight 2/step": 0.05, "customscript/additional_networks.py/txt2img/UNet Weight 2/value": 1.0, "customscript/additional_networks.py/txt2img/UNet Weight 2/minimum": -1.0, "customscript/additional_networks.py/txt2img/UNet Weight 2/maximum": 2.0, "customscript/additional_networks.py/txt2img/UNet Weight 2/step": 0.05, "customscript/additional_networks.py/txt2img/TEnc Weight 2/value": 1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 2/minimum": -1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 2/maximum": 2.0, "customscript/additional_networks.py/txt2img/TEnc Weight 2/step": 0.05, "txt2img/Weight 3/visible": true, "txt2img/Weight 3/value": 1.0, "txt2img/Weight 3/minimum": -1.0, "txt2img/Weight 3/maximum": 2.0, "txt2img/Weight 3/step": 0.05, "customscript/additional_networks.py/txt2img/UNet Weight 3/value": 1.0, "customscript/additional_networks.py/txt2img/UNet Weight 3/minimum": -1.0, "customscript/additional_networks.py/txt2img/UNet Weight 3/maximum": 2.0, "customscript/additional_networks.py/txt2img/UNet Weight 3/step": 0.05, "customscript/additional_networks.py/txt2img/TEnc Weight 3/value": 1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 3/minimum": -1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 3/maximum": 2.0, "customscript/additional_networks.py/txt2img/TEnc Weight 3/step": 0.05, "txt2img/Weight 4/visible": true, "txt2img/Weight 4/value": 1.0, "txt2img/Weight 4/minimum": -1.0, "txt2img/Weight 4/maximum": 2.0, "txt2img/Weight 4/step": 0.05, "customscript/additional_networks.py/txt2img/UNet Weight 4/value": 1.0, "customscript/additional_networks.py/txt2img/UNet Weight 4/minimum": -1.0, "customscript/additional_networks.py/txt2img/UNet Weight 4/maximum": 2.0, "customscript/additional_networks.py/txt2img/UNet Weight 4/step": 0.05, "customscript/additional_networks.py/txt2img/TEnc Weight 4/value": 1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 4/minimum": -1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 4/maximum": 2.0, "customscript/additional_networks.py/txt2img/TEnc Weight 4/step": 0.05, "txt2img/Weight 5/visible": true, "txt2img/Weight 5/value": 1.0, "txt2img/Weight 5/minimum": -1.0, "txt2img/Weight 5/maximum": 2.0, "txt2img/Weight 5/step": 0.05, "customscript/additional_networks.py/txt2img/UNet Weight 5/value": 1.0, "customscript/additional_networks.py/txt2img/UNet Weight 5/minimum": -1.0, "customscript/additional_networks.py/txt2img/UNet Weight 5/maximum": 2.0, "customscript/additional_networks.py/txt2img/UNet Weight 5/step": 0.05, "customscript/additional_networks.py/txt2img/TEnc Weight 5/value": 1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 5/minimum": -1.0, "customscript/additional_networks.py/txt2img/TEnc Weight 5/maximum": 2.0, "customscript/additional_networks.py/txt2img/TEnc Weight 5/step": 0.05, "customscript/additional_networks.py/img2img/Separate UNet/Text Encoder weights/visible": true, "customscript/additional_networks.py/img2img/Separate UNet/Text Encoder weights/value": false, "img2img/Weight 1/visible": true, "img2img/Weight 1/value": 1.0, "img2img/Weight 1/minimum": -1.0, "img2img/Weight 1/maximum": 2.0, "img2img/Weight 1/step": 0.05, "customscript/additional_networks.py/img2img/UNet Weight 1/value": 1.0, "customscript/additional_networks.py/img2img/UNet Weight 1/minimum": -1.0, "customscript/additional_networks.py/img2img/UNet Weight 1/maximum": 2.0, "customscript/additional_networks.py/img2img/UNet Weight 1/step": 0.05, "customscript/additional_networks.py/img2img/TEnc Weight 1/value": 1.0, "customscript/additional_networks.py/img2img/TEnc Weight 1/minimum": -1.0, "customscript/additional_networks.py/img2img/TEnc Weight 1/maximum": 2.0, "customscript/additional_networks.py/img2img/TEnc Weight 1/step": 0.05, "img2img/Weight 2/visible": true, "img2img/Weight 2/value": 1.0, "img2img/Weight 2/minimum": -1.0, "img2img/Weight 2/maximum": 2.0, "img2img/Weight 2/step": 0.05, "customscript/additional_networks.py/img2img/UNet Weight 2/value": 1.0, "customscript/additional_networks.py/img2img/UNet Weight 2/minimum": -1.0, "customscript/additional_networks.py/img2img/UNet Weight 2/maximum": 2.0, "customscript/additional_networks.py/img2img/UNet Weight 2/step": 0.05, "customscript/additional_networks.py/img2img/TEnc Weight 2/value": 1.0, "customscript/additional_networks.py/img2img/TEnc Weight 2/minimum": -1.0, "customscript/additional_networks.py/img2img/TEnc Weight 2/maximum": 2.0, "customscript/additional_networks.py/img2img/TEnc Weight 2/step": 0.05, "img2img/Weight 3/visible": true, "img2img/Weight 3/value": 1.0, "img2img/Weight 3/minimum": -1.0, "img2img/Weight 3/maximum": 2.0, "img2img/Weight 3/step": 0.05, "customscript/additional_networks.py/img2img/UNet Weight 3/value": 1.0, "customscript/additional_networks.py/img2img/UNet Weight 3/minimum": -1.0, "customscript/additional_networks.py/img2img/UNet Weight 3/maximum": 2.0, "customscript/additional_networks.py/img2img/UNet Weight 3/step": 0.05, "customscript/additional_networks.py/img2img/TEnc Weight 3/value": 1.0, "customscript/additional_networks.py/img2img/TEnc Weight 3/minimum": -1.0, "customscript/additional_networks.py/img2img/TEnc Weight 3/maximum": 2.0, "customscript/additional_networks.py/img2img/TEnc Weight 3/step": 0.05, "img2img/Weight 4/visible": true, "img2img/Weight 4/value": 1.0, "img2img/Weight 4/minimum": -1.0, "img2img/Weight 4/maximum": 2.0, "img2img/Weight 4/step": 0.05, "customscript/additional_networks.py/img2img/UNet Weight 4/value": 1.0, "customscript/additional_networks.py/img2img/UNet Weight 4/minimum": -1.0, "customscript/additional_networks.py/img2img/UNet Weight 4/maximum": 2.0, "customscript/additional_networks.py/img2img/UNet Weight 4/step": 0.05, "customscript/additional_networks.py/img2img/TEnc Weight 4/value": 1.0, "customscript/additional_networks.py/img2img/TEnc Weight 4/minimum": -1.0, "customscript/additional_networks.py/img2img/TEnc Weight 4/maximum": 2.0, "customscript/additional_networks.py/img2img/TEnc Weight 4/step": 0.05, "img2img/Weight 5/visible": true, "img2img/Weight 5/value": 1.0, "img2img/Weight 5/minimum": -1.0, "img2img/Weight 5/maximum": 2.0, "img2img/Weight 5/step": 0.05, "customscript/additional_networks.py/img2img/UNet Weight 5/value": 1.0, "customscript/additional_networks.py/img2img/UNet Weight 5/minimum": -1.0, "customscript/additional_networks.py/img2img/UNet Weight 5/maximum": 2.0, "customscript/additional_networks.py/img2img/UNet Weight 5/step": 0.05, "customscript/additional_networks.py/img2img/TEnc Weight 5/value": 1.0, "customscript/additional_networks.py/img2img/TEnc Weight 5/minimum": -1.0, "customscript/additional_networks.py/img2img/TEnc Weight 5/maximum": 2.0, "customscript/additional_networks.py/img2img/TEnc Weight 5/step": 0.05 } ================================================ FILE: webui-macos-env.sh ================================================ #!/bin/bash #################################################################### # macOS defaults # # Please modify webui-user.sh to change these instead of this file # #################################################################### if [[ -x "$(command -v python3.10)" ]] then python_cmd="python3.10" fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1" export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" export PYTORCH_ENABLE_MPS_FALLBACK=1 #################################################################### ================================================ FILE: webui-user.bat ================================================ @echo off set PYTHON= set GIT= set VENV_DIR= set COMMANDLINE_ARGS= call webui.bat ================================================ FILE: webui-user.sh ================================================ #!/bin/bash ######################################################### # Uncomment and change the variables below to your need:# ######################################################### # Install directory without trailing slash #install_dir="/home/$(whoami)" # Name of the subdirectory #clone_dir="stable-diffusion-webui" # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" #export COMMANDLINE_ARGS="" # python3 executable #python_cmd="python3" # git executable #export GIT="git" # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) #venv_dir="venv" # script to launch to start the app #export LAUNCH_SCRIPT="launch.py" # install command for torch #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113" # Requirements file to use for stable-diffusion-webui #export REQS_FILE="requirements_versions.txt" # Fixed git repos #export K_DIFFUSION_PACKAGE="" #export GFPGAN_PACKAGE="" # Fixed git commits #export STABLE_DIFFUSION_COMMIT_HASH="" #export TAMING_TRANSFORMERS_COMMIT_HASH="" #export CODEFORMER_COMMIT_HASH="" #export BLIP_COMMIT_HASH="" # Uncomment to enable accelerated launch #export ACCELERATE="True" ########################################### ================================================ FILE: webui.bat ================================================ @echo off if not defined PYTHON (set PYTHON=python) if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv") set ERROR_REPORTING=FALSE mkdir tmp 2>NUL %PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt if %ERRORLEVEL% == 0 goto :check_pip echo Couldn't launch python goto :show_stdout_stderr :check_pip %PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt if %ERRORLEVEL% == 0 goto :start_venv if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr %PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt if %ERRORLEVEL% == 0 goto :start_venv echo Couldn't install pip goto :show_stdout_stderr :start_venv if ["%VENV_DIR%"] == ["-"] goto :skip_venv if ["%SKIP_VENV%"] == ["1"] goto :skip_venv dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt if %ERRORLEVEL% == 0 goto :activate_venv for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt if %ERRORLEVEL% == 0 goto :activate_venv echo Unable to create venv in directory "%VENV_DIR%" goto :show_stdout_stderr :activate_venv set PYTHON="%VENV_DIR%\Scripts\Python.exe" echo venv %PYTHON% :skip_venv if [%ACCELERATE%] == ["True"] goto :accelerate goto :launch :accelerate echo Checking for accelerate set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe" if EXIST %ACCELERATE% goto :accelerate_launch :launch %PYTHON% launch.py %* pause exit /b :accelerate_launch echo Accelerating %ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py pause exit /b :show_stdout_stderr echo. echo exit code: %errorlevel% for /f %%i in ("tmp\stdout.txt") do set size=%%~zi if %size% equ 0 goto :show_stderr echo. echo stdout: type tmp\stdout.txt :show_stderr for /f %%i in ("tmp\stderr.txt") do set size=%%~zi if %size% equ 0 goto :show_stderr echo. echo stderr: type tmp\stderr.txt :endofscript echo. echo Launch unsuccessful. Exiting. pause ================================================ FILE: webui.py ================================================ import os import sys import time import importlib import signal import re from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware from packaging import version import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) from modules import import_hook, errors, extra_networks, ui_extra_networks_checkpoints from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call import torch # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors if ".dev" in torch.__version__ or "+git" in torch.__version__: torch.__long_version__ = torch.__version__ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks import modules.codeformer_model as codeformer import modules.face_restoration import modules.gfpgan_model as gfpgan import modules.img2img import modules.lowvram import modules.paths import modules.scripts import modules.sd_hijack import modules.sd_models import modules.sd_vae import modules.txt2img import modules.script_callbacks import modules.textual_inversion.textual_inversion import modules.progress import modules.ui from modules import modelloader from modules.shared import cmd_opts import modules.hypernetworks.hypernetwork if cmd_opts.server_name: server_name = cmd_opts.server_name else: server_name = "0.0.0.0" if cmd_opts.listen else None def check_versions(): if shared.cmd_opts.skip_version_check: return expected_torch_version = "1.13.1" if version.parse(torch.__version__) < version.parse(expected_torch_version): errors.print_error_explanation(f""" You are running torch {torch.__version__}. The program is tested to work with torch {expected_torch_version}. To reinstall the desired version, run with commandline flag --reinstall-torch. Beware that this will cause a lot of large files to be downloaded, as well as there are reports of issues with training tab on the latest version. Use --skip-version-check commandline argument to disable this check. """.strip()) expected_xformers_version = "0.0.16rc425" if shared.xformers_available: import xformers if version.parse(xformers.__version__) < version.parse(expected_xformers_version): errors.print_error_explanation(f""" You are running xformers {xformers.__version__}. The program is tested to work with xformers {expected_xformers_version}. To reinstall the desired version, run with commandline flag --reinstall-xformers. Use --skip-version-check commandline argument to disable this check. """.strip()) def initialize(): check_versions() extensions.list_extensions() localization.list_localizations(cmd_opts.localizations_dir) if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers modules.scripts.load_scripts() return modelloader.cleanup_models() modules.sd_models.setup_model() codeformer.setup_model(cmd_opts.codeformer_models_path) gfpgan.setup_model(cmd_opts.gfpgan_models_path) modelloader.list_builtin_upscalers() modules.scripts.load_scripts() modelloader.load_upscalers() modules.sd_vae.refresh_vae_list() modules.textual_inversion.textual_inversion.list_textual_inversion_templates() try: modules.sd_models.load_model() except Exception as e: errors.display(e, "loading stable diffusion model") print("", file=sys.stderr) print("Stable diffusion model failed to load, exiting", file=sys.stderr) exit(1) shared.opts.data["sd_model_checkpoint"] = shared.sd_model.sd_checkpoint_info.title shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights())) shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) shared.reload_hypernetworks() ui_extra_networks.intialize() ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion()) ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks()) ui_extra_networks.register_page(ui_extra_networks_checkpoints.ExtraNetworksPageCheckpoints()) extra_networks.initialize() extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet()) if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None: try: if not os.path.exists(cmd_opts.tls_keyfile): print("Invalid path to TLS keyfile given") if not os.path.exists(cmd_opts.tls_certfile): print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'") except TypeError: cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None print("TLS setup invalid, running webui without TLS") else: print("Running with TLS") # make the program just exit at ctrl+c without waiting for anything def sigint_handler(sig, frame): print(f'Interrupted with signal {sig} in {frame}') os._exit(0) signal.signal(signal.SIGINT, sigint_handler) def setup_cors(app): if cmd_opts.cors_allow_origins and cmd_opts.cors_allow_origins_regex: app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*']) elif cmd_opts.cors_allow_origins: app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'], allow_credentials=True, allow_headers=['*']) elif cmd_opts.cors_allow_origins_regex: app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*']) def create_api(app): from modules.api.api import Api api = Api(app, queue_lock) return api def wait_on_server(demo=None): while 1: time.sleep(0.5) if shared.state.need_restart: shared.state.need_restart = False time.sleep(0.5) demo.close() time.sleep(0.5) break def api_only(): initialize() app = FastAPI() setup_cors(app) app.add_middleware(GZipMiddleware, minimum_size=1000) api = create_api(app) modules.script_callbacks.app_started_callback(None, app) api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) def webui(): launch_api = cmd_opts.api initialize() while 1: if shared.opts.clean_temp_dir_at_start: ui_tempdir.cleanup_tmpdr() modules.script_callbacks.before_ui_callback() shared.demo = modules.ui.create_ui() if cmd_opts.gradio_queue: shared.demo.queue(64) gradio_auth_creds = [] if cmd_opts.gradio_auth: gradio_auth_creds += cmd_opts.gradio_auth.strip('"').replace('\n', '').split(',') if cmd_opts.gradio_auth_path: with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file: for line in file.readlines(): gradio_auth_creds += [x.strip() for x in line.split(',')] app, local_url, share_url = shared.demo.launch( share=cmd_opts.share, server_name=server_name, server_port=cmd_opts.port, ssl_keyfile=cmd_opts.tls_keyfile, ssl_certfile=cmd_opts.tls_certfile, debug=cmd_opts.gradio_debug, auth=[tuple(cred.split(':')) for cred in gradio_auth_creds] if gradio_auth_creds else None, inbrowser=cmd_opts.autolaunch, prevent_thread_lock=True ) # after initial launch, disable --autolaunch for subsequent restarts cmd_opts.autolaunch = False # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for # an attacker to trick the user into opening a malicious HTML page, which makes a request to the # running web ui and do whatever the attacker wants, including installing an extension and # running its code. We disable this here. Suggested by RyotaK. app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware'] setup_cors(app) app.add_middleware(GZipMiddleware, minimum_size=1000) modules.progress.setup_progress_api(app) if launch_api: create_api(app) ui_extra_networks.add_pages_to_demo(app) modules.script_callbacks.app_started_callback(shared.demo, app) wait_on_server(shared.demo) print('Restarting UI...') sd_samplers.set_samplers() modules.script_callbacks.script_unloaded_callback() extensions.list_extensions() localization.list_localizations(cmd_opts.localizations_dir) modelloader.forbid_loaded_nonbuiltin_upscalers() modules.scripts.reload_scripts() modules.script_callbacks.model_loaded_callback(shared.sd_model) modelloader.load_upscalers() for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: importlib.reload(module) modules.sd_models.list_models() shared.reload_hypernetworks() ui_extra_networks.intialize() ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion()) ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks()) ui_extra_networks.register_page(ui_extra_networks_checkpoints.ExtraNetworksPageCheckpoints()) extra_networks.initialize() extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet()) if __name__ == "__main__": if cmd_opts.nowebui: api_only() else: webui() ================================================ FILE: webui.sh ================================================ #!/usr/bin/env bash ################################################# # Please do not make any changes to this file, # # change the variables in webui-user.sh instead # ################################################# # If run from macOS, load defaults from webui-macos-env.sh if [[ "$OSTYPE" == "darwin"* ]]; then if [[ -f webui-macos-env.sh ]] then source ./webui-macos-env.sh fi fi # Read variables from webui-user.sh # shellcheck source=/dev/null if [[ -f webui-user.sh ]] then source ./webui-user.sh fi # Set defaults # Install directory without trailing slash if [[ -z "${install_dir}" ]] then install_dir="/home/$(whoami)" fi # Name of the subdirectory (defaults to stable-diffusion-webui) if [[ -z "${clone_dir}" ]] then clone_dir="stable-diffusion-webui" fi # python3 executable if [[ -z "${python_cmd}" ]] then python_cmd="python3" fi # git executable if [[ -z "${GIT}" ]] then export GIT="git" fi # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) if [[ -z "${venv_dir}" ]] then venv_dir="venv" fi if [[ -z "${LAUNCH_SCRIPT}" ]] then LAUNCH_SCRIPT="launch.py" fi # this script cannot be run as root by default can_run_as_root=0 # read any command line flags to the webui.sh script while getopts "f" flag > /dev/null 2>&1 do case ${flag} in f) can_run_as_root=1;; *) break;; esac done # Disable sentry logging export ERROR_REPORTING=FALSE # Do not reinstall existing pip packages on Debian/Ubuntu export PIP_IGNORE_INSTALLED=0 # Pretty print delimiter="################################################################" printf "\n%s\n" "${delimiter}" printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n" printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m" printf "\n%s\n" "${delimiter}" # Do not run as root if [[ $(id -u) -eq 0 && can_run_as_root -eq 0 ]] then printf "\n%s\n" "${delimiter}" printf "\e[1m\e[31mERROR: This script must not be launched as root, aborting...\e[0m" printf "\n%s\n" "${delimiter}" exit 1 else printf "\n%s\n" "${delimiter}" printf "Running on \e[1m\e[32m%s\e[0m user" "$(whoami)" printf "\n%s\n" "${delimiter}" fi if [[ -d .git ]] then printf "\n%s\n" "${delimiter}" printf "Repo already cloned, using it as install directory" printf "\n%s\n" "${delimiter}" install_dir="${PWD}/../" clone_dir="${PWD##*/}" fi # Check prerequisites gpu_info=$(lspci 2>/dev/null | grep VGA) case "$gpu_info" in *"Navi 1"*|*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0 ;; *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0 printf "\n%s\n" "${delimiter}" printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half" printf "\n%s\n" "${delimiter}" ;; *) ;; esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" do if ! hash "${preq}" &>/dev/null then printf "\n%s\n" "${delimiter}" printf "\e[1m\e[31mERROR: %s is not installed, aborting...\e[0m" "${preq}" printf "\n%s\n" "${delimiter}" exit 1 fi done if ! "${python_cmd}" -c "import venv" &>/dev/null then printf "\n%s\n" "${delimiter}" printf "\e[1m\e[31mERROR: python3-venv is not installed, aborting...\e[0m" printf "\n%s\n" "${delimiter}" exit 1 fi cd "${install_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/, aborting...\e[0m" "${install_dir}"; exit 1; } if [[ -d "${clone_dir}" ]] then cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; } else printf "\n%s\n" "${delimiter}" printf "Clone stable-diffusion-webui" printf "\n%s\n" "${delimiter}" "${GIT}" clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}" cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; } fi printf "\n%s\n" "${delimiter}" printf "Create and activate python venv" printf "\n%s\n" "${delimiter}" cd "${install_dir}"/"${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; } if [[ ! -d "${venv_dir}" ]] then "${python_cmd}" -m venv "${venv_dir}" first_launch=1 fi # shellcheck source=/dev/null if [[ -f "${venv_dir}"/bin/activate ]] then source "${venv_dir}"/bin/activate else printf "\n%s\n" "${delimiter}" printf "\e[1m\e[31mERROR: Cannot activate python venv, aborting...\e[0m" printf "\n%s\n" "${delimiter}" exit 1 fi if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ] then printf "\n%s\n" "${delimiter}" printf "Accelerating launch.py..." printf "\n%s\n" "${delimiter}" exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@" else printf "\n%s\n" "${delimiter}" printf "Launching launch.py..." printf "\n%s\n" "${delimiter}" exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" fi