SYMBOL INDEX (72 symbols across 7 files) FILE: attention_processor.py class FluxAttnProcessor2_0 (line 20) | class FluxAttnProcessor2_0: method __init__ (line 23) | def __init__(self, distill=False): method __call__ (line 28) | def __call__( function init_local_downsample_mask_flex (line 117) | def init_local_downsample_mask_flex(height, width, text_length, window_s... class LocalDownsampleFlexAttnProcessor (line 145) | class LocalDownsampleFlexAttnProcessor(nn.Module): method __init__ (line 147) | def __init__(self, down_factor=4, distill=False): method __call__ (line 156) | def __call__( function init_local_mask_flex (line 252) | def init_local_mask_flex(height, width, text_length, window_size, device): class LocalFlexAttnProcessor (line 270) | class LocalFlexAttnProcessor: method __init__ (line 273) | def __init__(self, distill=False): method __call__ (line 279) | def __call__( FILE: cache_latent_codes.py function parse_args (line 12) | def parse_args(input_args=None): function main (line 89) | def main(args): FILE: cache_prompt_embeds.py function parse_args (line 11) | def parse_args(input_args=None): function tokenize_prompt (line 88) | def tokenize_prompt(tokenizer, prompt, max_sequence_length): function _encode_prompt_with_t5 (line 102) | def _encode_prompt_with_t5( function _encode_prompt_with_clip (line 143) | def _encode_prompt_with_clip( function encode_prompt (line 183) | def encode_prompt( function main (line 216) | def main(args): FILE: dataset.py function image_resize (line 13) | def image_resize(img, max_size=512): function c_crop (line 26) | def c_crop(image): function crop_to_aspect_ratio (line 36) | def crop_to_aspect_ratio(image, ratio="16:9"): class CustomImageDataset (line 61) | class CustomImageDataset(Dataset): method __init__ (line 62) | def __init__(self, img_dir, img_size=512, caption_type='json', method __len__ (line 72) | def __len__(self): method __getitem__ (line 75) | def __getitem__(self, idx): function loader (line 109) | def loader(train_batch_size, num_workers, **args): FILE: distill.py function load_text_encoders (line 57) | def load_text_encoders(class_one, class_two): function log_validation (line 67) | def log_validation( function import_model_class_from_model_name_or_path (line 112) | def import_model_class_from_model_name_or_path( function parse_args (line 131) | def parse_args(input_args=None): function tokenize_prompt (line 472) | def tokenize_prompt(tokenizer, prompt, max_sequence_length): function _encode_prompt_with_t5 (line 486) | def _encode_prompt_with_t5( function _encode_prompt_with_clip (line 527) | def _encode_prompt_with_clip( function encode_prompt (line 567) | def encode_prompt( function main (line 604) | def main(args): FILE: pipeline_flux_img2img.py function calculate_shift (line 75) | def calculate_shift( function retrieve_latents (line 89) | def retrieve_latents( function retrieve_timesteps (line 103) | def retrieve_timesteps( class FluxImg2ImgPipeline (line 162) | class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): method __init__ (line 193) | def __init__( method _get_t5_prompt_embeds (line 224) | def _get_t5_prompt_embeds( method _get_clip_prompt_embeds (line 274) | def _get_clip_prompt_embeds( method encode_prompt (line 319) | def encode_prompt( method _encode_vae_image (line 399) | def _encode_vae_image(self, image: torch.Tensor, generator: torch.Gene... method get_timesteps (line 414) | def get_timesteps(self, num_inference_steps, strength, device): method check_inputs (line 425) | def check_inputs( method _prepare_latent_image_ids (line 479) | def _prepare_latent_image_ids(batch_size, height, width, device, dtype): method _pack_latents (line 494) | def _pack_latents(latents, batch_size, num_channels_latents, height, w... method _unpack_latents (line 503) | def _unpack_latents(latents, height, width, vae_scale_factor): method prepare_latents (line 516) | def prepare_latents( method guidance_scale (line 564) | def guidance_scale(self): method joint_attention_kwargs (line 568) | def joint_attention_kwargs(self): method num_timesteps (line 572) | def num_timesteps(self): method interrupt (line 576) | def interrupt(self): method __call__ (line 581) | def __call__( FILE: transformer_flux.py class FluxSingleTransformerBlock (line 43) | class FluxSingleTransformerBlock(nn.Module): method __init__ (line 57) | def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_r... method forward (line 80) | def forward( class FluxTransformerBlock (line 110) | class FluxTransformerBlock(nn.Module): method __init__ (line 124) | def __init__(self, dim, num_attention_heads, attention_head_dim, qk_no... method forward (line 161) | def forward( class FluxPosEmbed (line 213) | class FluxPosEmbed(nn.Module): method __init__ (line 215) | def __init__(self, theta: int, axes_dim: List[int]): method forward (line 220) | def forward(self, ids: torch.Tensor, ntk_factor=1) -> torch.Tensor: class FluxTransformer2DModel (line 239) | class FluxTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, ... method __init__ (line 261) | def __init__( method attn_processors (line 319) | def attn_processors(self) -> Dict[str, AttentionProcessor]: method set_attn_processor (line 343) | def set_attn_processor(self, processor: Union[AttentionProcessor, Dict... method unfuse_qkv_projections (line 378) | def unfuse_qkv_projections(self): method _set_gradient_checkpointing (line 391) | def _set_gradient_checkpointing(self, module, value=False): method forward (line 395) | def forward(