mirror of
https://github.com/openvinotoolkit/stable-diffusion-webui.git
synced 2024-12-14 22:53:25 +03:00
Merge branch 'master' into hn-activation
This commit is contained in:
commit
649d79a8ec
415
localizations/fr-FR.json
Normal file
415
localizations/fr-FR.json
Normal file
@ -0,0 +1,415 @@
|
||||
{
|
||||
"⤡": "⤡",
|
||||
"⊞": "⊞",
|
||||
"×": "×",
|
||||
"❮": "❮",
|
||||
"❯": "❯",
|
||||
"Loading...": "Chargement...",
|
||||
"view": "vue",
|
||||
"api": "api",
|
||||
"•": "•",
|
||||
"built with gradio": "Construit avec Gradio",
|
||||
"Stable Diffusion checkpoint": "checkpoint Stable Diffusion",
|
||||
"txt2img": "txt2img",
|
||||
"img2img": "img2img",
|
||||
"Extras": "Extras",
|
||||
"PNG Info": "Infos PNG",
|
||||
"History": "Historique",
|
||||
"Checkpoint Merger": "Fusion de checkpoints",
|
||||
"Train": "Entrainer",
|
||||
"Settings": "Paramètres",
|
||||
"Prompt": "Requête",
|
||||
"Negative prompt": "Requête négative",
|
||||
"Run": "Lancer",
|
||||
"Skip": "Passer",
|
||||
"Interrupt": "Interrrompre",
|
||||
"Generate": "Générer",
|
||||
"Style 1": "Style 1",
|
||||
"Style 2": "Style 2",
|
||||
"Label": "Etiquette",
|
||||
"File": "Fichier",
|
||||
"Drop File Here": "Déposer votre fichier ici",
|
||||
"-": "-",
|
||||
"or": "ou",
|
||||
"Click to Upload": "Cliquer pour uploader",
|
||||
"Image": "Image",
|
||||
"Check progress": "Voir l'avancement",
|
||||
"Check progress (first)": "Voir l'avancement (1er)",
|
||||
"Sampling Steps": "Étapes d'échantillonnage",
|
||||
"Sampling method": "Méthode d'échantillonnage",
|
||||
"Euler a": "Euler a",
|
||||
"Euler": "Euler",
|
||||
"LMS": "LMS",
|
||||
"Heun": "Heun",
|
||||
"DPM2": "DPM2",
|
||||
"DPM2 a": "DPM2 a",
|
||||
"DPM fast": "DPM fast",
|
||||
"DPM adaptive": "DPM adaptive",
|
||||
"LMS Karras": "LMS Karras",
|
||||
"DPM2 Karras": "DPM2 Karras",
|
||||
"DPM2 a Karras": "DPM2 a Karras",
|
||||
"DDIM": "DDIM",
|
||||
"PLMS": "PLMS",
|
||||
"Width": "Largeur",
|
||||
"Height": "Hauteur",
|
||||
"Restore faces": "Restaurer les visages",
|
||||
"Tiling": "Mode Tuile",
|
||||
"Highres. fix": "Correction haute résolution",
|
||||
"Firstpass width": "Largeur première passe",
|
||||
"Firstpass height": "Hauteur seconde passe",
|
||||
"Denoising strength": "Puissance de réduction du bruit",
|
||||
"Batch count": "Nombre de lots",
|
||||
"Batch size": "Taille de lots",
|
||||
"CFG Scale": "Echelle CFG",
|
||||
"Seed": "Valeur aléatoire",
|
||||
"Extra": "Extra",
|
||||
"Variation seed": "Variation de la valeur aléatoire",
|
||||
"Variation strength": "Puissance de variation",
|
||||
"Resize seed from width": "Largeur de redimensionnement de la valeur aléatoire",
|
||||
"Resize seed from height": "Hauteur de redimensionnement de la valeur aléatoire",
|
||||
"Script": "Script",
|
||||
"None": "Aucun",
|
||||
"Prompt matrix": "Matrice de requète",
|
||||
"Prompts from file or textbox": "Requètes depuis un fichier ou une boite de dialogue",
|
||||
"X/Y plot": "graphe X/Y",
|
||||
"Put variable parts at start of prompt": "Mettre les mots clés variable au début de la requête",
|
||||
"Show Textbox": "Afficher le champs texte",
|
||||
"File with inputs": "Fichier d'entrée",
|
||||
"Prompts": "Requêtes",
|
||||
"X type": "Paramètre axe X",
|
||||
"Nothing": "Rien",
|
||||
"Var. seed": "Valeur aléatoire variable",
|
||||
"Var. strength": "Puissance variable",
|
||||
"Steps": "Étapes",
|
||||
"Prompt S/R": "Cherche et remplace dans la requête",
|
||||
"Prompt order": "Ordre de la requête",
|
||||
"Sampler": "Echantilloneur",
|
||||
"Checkpoint name": "Nom du checkpoint",
|
||||
"Hypernetwork": "Hypernetwork",
|
||||
"Hypernet str.": "Force de l'Hypernetwork",
|
||||
"Sigma Churn": "Sigma Churn",
|
||||
"Sigma min": "Sigma min.",
|
||||
"Sigma max": "Sigma max.",
|
||||
"Sigma noise": "Bruit Sigma",
|
||||
"Eta": "Temps estimé",
|
||||
"Clip skip": "Passer Clip",
|
||||
"Denoising": "Réduction du bruit",
|
||||
"X values": "Valeurs X",
|
||||
"Y type": "Paramètre axe Y",
|
||||
"Y values": "Valeurs Y",
|
||||
"Draw legend": "Afficher la légende",
|
||||
"Include Separate Images": "Inclure les images séparées",
|
||||
"Keep -1 for seeds": "Conserver -1 pour la valeur aléatoire",
|
||||
"Drop Image Here": "Déposer l'image ici",
|
||||
"Save": "Enregistrer",
|
||||
"Send to img2img": "Envoyer vers img2img",
|
||||
"Send to inpaint": "Envoyer vers inpaint",
|
||||
"Send to extras": "Envoyer vers extras",
|
||||
"Make Zip when Save?": "Créer un zip lors de l'enregistrement?",
|
||||
"Textbox": "Champ texte",
|
||||
"Interrogate\nCLIP": "Interroger\nCLIP",
|
||||
"Interrogate\nDeepBooru": "Interroger\nDeepBooru",
|
||||
"Inpaint": "Inpaint",
|
||||
"Batch img2img": "Lot img2img",
|
||||
"Image for img2img": "Image pour img2img",
|
||||
"Image for inpainting with mask": "Image pour inpainting avec masque",
|
||||
"Mask": "Masque",
|
||||
"Mask blur": "Flou masque",
|
||||
"Mask mode": "Mode masque",
|
||||
"Draw mask": "Dessiner masque",
|
||||
"Upload mask": "Uploader masque",
|
||||
"Masking mode": "Mode de masquage",
|
||||
"Inpaint masked": "Inpaint masqué",
|
||||
"Inpaint not masked": "Inpaint non masqué",
|
||||
"Masked content": "Contenu masqué",
|
||||
"fill": "remplir",
|
||||
"original": "original",
|
||||
"latent noise": "bruit latent",
|
||||
"latent nothing": "latent vide",
|
||||
"Inpaint at full resolution": "Inpaint en pleine résolution",
|
||||
"Inpaint at full resolution padding, pixels": "Padding de l'inpaint en pleine résolution, en pixels",
|
||||
"Process images in a directory on the same machine where the server is running.": "Traite les images dans un dossier sur la même machine où le serveur tourne",
|
||||
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "Utiliser un dossier de sortie vide pour enregistrer les images normalement plutôt que d'écrire dans le dossier de sortie",
|
||||
"Input directory": "Dossier d'entrée",
|
||||
"Output directory": "Dossier de sortie",
|
||||
"Resize mode": "Mode redimensionnement",
|
||||
"Just resize": "Redimensionner uniquement",
|
||||
"Crop and resize": "Recadrer et redimensionner",
|
||||
"Resize and fill": "Redimensionner et remplir",
|
||||
"img2img alternative test": "Test alternatif img2img",
|
||||
"Loopback": "Bouclage",
|
||||
"Outpainting mk2": "Outpainting v2",
|
||||
"Poor man's outpainting": "Outpainting du pauvre",
|
||||
"SD upscale": "Agrandissement SD",
|
||||
"should be 2 or lower.": "doit être inférieur ou égal à 2",
|
||||
"Override `Sampling method` to Euler?(this method is built for it)": "Forcer `Méthode d'échantillonnage` à Euler ? (cette méthode est dédiée à cela)",
|
||||
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Forcer la `requête` au contenu de la `requête d'origine` ? (de même pour la `requête négative`)",
|
||||
"Original prompt": "Requête d'origine",
|
||||
"Original negative prompt": "Requête négative d'origine",
|
||||
"Override `Sampling Steps` to the same value as `Decode steps`?": "Forcer le valeur d'`Étapes d'échantillonnage` à la même valeur qu'`Étapes de décodage` ?",
|
||||
"Decode steps": "Étapes de décodage",
|
||||
"Override `Denoising strength` to 1?": "Forcer `Puissance de réduction du bruit` à 1 ?",
|
||||
"Decode CFG scale": "Echelle CFG de décodage",
|
||||
"Randomness": "Aléatoire",
|
||||
"Sigma adjustment for finding noise for image": "Ajustement Sigma lors de la recherche du bruit dans l'image",
|
||||
"Loops": "Boucles",
|
||||
"Denoising strength change factor": "Facteur de changement de la puissance de réduction du bruit",
|
||||
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Paramètres recommandés : Étapes d'échantillonnage : 80-100, Echantillonneur : Euler a, Puissance de réduction du bruit : 0.8",
|
||||
"Pixels to expand": "Pixels à étendre",
|
||||
"Outpainting direction": "Direction de l'outpainting",
|
||||
"left": "gauche",
|
||||
"right": "droite",
|
||||
"up": "haut",
|
||||
"down": "bas",
|
||||
"Fall-off exponent (lower=higher detail)": "Exposant de diminution (plus petit = plus de détails)",
|
||||
"Color variation": "Variation de couleur",
|
||||
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Agrandira l'image à deux fois sa taille; utilisez les glissières largeur et hauteur afin de choisir la taille de tuile",
|
||||
"Tile overlap": "Chevauchement de tuile",
|
||||
"Upscaler": "Agrandisseur",
|
||||
"Lanczos": "Lanczos",
|
||||
"LDSR": "LDSR",
|
||||
"BSRGAN 4x": "BSRGAN 4x",
|
||||
"ESRGAN_4x": "ESRGAN_4x",
|
||||
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
|
||||
"ScuNET GAN": "ScuNET GAN",
|
||||
"ScuNET PSNR": "ScuNET PSNR",
|
||||
"SwinIR 4x": "SwinIR 4x",
|
||||
"Single Image": "Image unique",
|
||||
"Batch Process": "Traitement par lot",
|
||||
"Batch from Directory": "Lot depuis un dossier",
|
||||
"Source": "Source",
|
||||
"Show result images": "Montrez les images résultantes",
|
||||
"Scale by": "Mise à l'échelle de",
|
||||
"Scale to": "Mise à l'échelle à",
|
||||
"Resize": "Redimensionner",
|
||||
"Crop to fit": "Recadrer à la taille",
|
||||
"Upscaler 2": "Agrandisseur 2",
|
||||
"Upscaler 2 visibility": "Visibilité de l'agrandisseur 2",
|
||||
"GFPGAN visibility": "Visibilité GFPGAN",
|
||||
"CodeFormer visibility": "Visibilité CodeFormer",
|
||||
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Poids CodeFormer (0 = effet maximum, 1 = effet minimum)",
|
||||
"Open output directory": "Ouvrir le dossier de sortie",
|
||||
"Send to txt2img": "Envoyer vers txt2img",
|
||||
"txt2img history": "historique txt2img",
|
||||
"img2img history": "historique img2img",
|
||||
"extras history": "historique extras",
|
||||
"Renew Page": "Rafraichr la page",
|
||||
"First Page": "Première page",
|
||||
"Prev Page": "Page précendente",
|
||||
"Page Index": "Index des pages",
|
||||
"Next Page": "Page suivante",
|
||||
"End Page": "Page de fin",
|
||||
"number of images to delete consecutively next": "nombre d'image à supprimer consécutivement ensuite",
|
||||
"Delete": "Supprimer",
|
||||
"Generate Info": "Générer les informations",
|
||||
"File Name": "Nom de fichier",
|
||||
"set_index": "set_index",
|
||||
"A merger of the two checkpoints will be generated in your": "Une fusion des deux checkpoints sera générée dans votre",
|
||||
"checkpoint": "checkpoint",
|
||||
"directory.": "dossier",
|
||||
"Primary model (A)": "Modèle primaire (A)",
|
||||
"Secondary model (B)": "Modèle secondaire (B)",
|
||||
"Tertiary model (C)": "Modèle tertiaire (C)",
|
||||
"Custom Name (Optional)": "Nom personnalisé (Optionel)",
|
||||
"Multiplier (M) - set to 0 to get model A": "Multiplieur (M) - utiliser 0 pour le modèle A",
|
||||
"Interpolation Method": "Méthode d'interpolation",
|
||||
"Weighted sum": "Somme pondérée",
|
||||
"Add difference": "Ajouter différence",
|
||||
"Save as float16": "Enregistrer en tant que float16",
|
||||
"See": "Voir",
|
||||
"wiki": "wiki",
|
||||
"for detailed explanation.": "pour une explication détaillée.",
|
||||
"Create embedding": "Créer un embedding",
|
||||
"Create hypernetwork": "Créer un hypernetwork",
|
||||
"Preprocess images": "Pré-traite les images",
|
||||
"Name": "Nom",
|
||||
"Initialization text": "Texte d'initialisation",
|
||||
"Number of vectors per token": "Nombre de vecteurs par jeton",
|
||||
"Modules": "Modules",
|
||||
"Source directory": "Dossier source",
|
||||
"Destination directory": "Dossier destination",
|
||||
"Create flipped copies": "Créer des copies en mirroir",
|
||||
"Split oversized images into two": "Couper les images trop grandes en deux",
|
||||
"Use BLIP for caption": "Utiliser BLIP pour les descriptions",
|
||||
"Use deepbooru for caption": "Utiliser deepbooru pour les descriptions",
|
||||
"Preprocess": "Pré-traite",
|
||||
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "Entrainer un embedding ; spécifiez un dossier contenant un ensemble d'images avec un ratio de 1:1",
|
||||
"Embedding": "Embedding",
|
||||
"Learning rate": "Vitesse d'apprentissage",
|
||||
"Dataset directory": "Dossier des images d'entrée",
|
||||
"Log directory": "Dossier de journalisation",
|
||||
"Prompt template file": "Fichier modèle de requêtes",
|
||||
"Max steps": "Étapes max.",
|
||||
"Save an image to log directory every N steps, 0 to disable": "Enregistrer une image dans le dossier de journalisation toutes les N étapes, 0 pour désactiver",
|
||||
"Save a copy of embedding to log directory every N steps, 0 to disable": "Enregistrer une copie de l'embedding dans le dossier de journalisation toutes les N étapes, 0 pour désactiver",
|
||||
"Save images with embedding in PNG chunks": "Sauvegarder les images incluant l'embedding dans leur blocs PNG",
|
||||
"Read parameters (prompt, etc...) from txt2img tab when making previews": "Lire les paramètres (requête, etc.) depuis l'onglet txt2img lors de la génération des previews",
|
||||
"Train Hypernetwork": "Entrainer un Hypernetwork",
|
||||
"Train Embedding": "Entrainer un Embedding",
|
||||
"Apply settings": "Appliquer les paramètres",
|
||||
"Saving images/grids": "Enregistrer les images/grilles",
|
||||
"Always save all generated images": "Toujours enregistrer toutes les images",
|
||||
"File format for images": "Format de fichier pour les images",
|
||||
"Images filename pattern": "Motif pour le nom de fichier des images",
|
||||
"Always save all generated image grids": "Toujours enregistrer toutes les grilles d'images générées",
|
||||
"File format for grids": "Format de fichier pour les grilles",
|
||||
"Add extended info (seed, prompt) to filename when saving grid": "Ajouter les informations étendues (valeur aléatoire, requête) aux noms de fichiers lors de l'enregistrement d'une grille",
|
||||
"Do not save grids consisting of one picture": "Ne pas enregistrer les grilles contenant une seule image",
|
||||
"Prevent empty spots in grid (when set to autodetect)": "Eviter les vides dans la grille (quand autodétection est choisie)",
|
||||
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Nombre de colonnes de la grille; utilisez -1 pour autodétection et 0 pour qu'il soit égal à la taille du lot",
|
||||
"Save text information about generation parameters as chunks to png files": "Enregistrer l'information du text des paramètres de génération en tant que blocs dans les fichiers PNG",
|
||||
"Create a text file next to every image with generation parameters.": "Créer un fichier texte contenant les paramètres de génération à côté de chaque image",
|
||||
"Save a copy of image before doing face restoration.": "Enregistrer une copie de l'image avant de lancer la restauration de visage",
|
||||
"Quality for saved jpeg images": "Qualité pour les images jpeg enregistrées",
|
||||
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Si l'image PNG est plus grande que 4MB or l'une des ses dimensions supérieure à 4000, réduire sa taille et enregistrer une copie en JPG",
|
||||
"Use original name for output filename during batch process in extras tab": "Utiliser un nom de fichier original pour les fichiers de sortie durant le traitement par lot dans l'onglet Extras",
|
||||
"When using 'Save' button, only save a single selected image": "A l'utilisation du bouton `Enregistrer`, n'enregistrer que l'image séléctionnée",
|
||||
"Do not add watermark to images": "Ne pas ajouter de filigrane aux images",
|
||||
"Paths for saving": "Chemins pour l'enregistrement",
|
||||
"Output directory for images; if empty, defaults to three directories below": "Dossier de sortie pour les images; si non spécifié, le chemin par défaut sera trois niveau en dessous",
|
||||
"Output directory for txt2img images": "Dossier de sortie pour les images txt2img",
|
||||
"Output directory for img2img images": "Dossier de sortie pour les images img2img",
|
||||
"Output directory for images from extras tab": "Dossier de sortie pour les images de l'onglet Extras",
|
||||
"Output directory for grids; if empty, defaults to two directories below": "Dossier de sortie pour les grilles; si non spécifié, le chemin par défaut sera deux niveau en dessous",
|
||||
"Output directory for txt2img grids": "Dossier de sortie pour les grilles txt2img",
|
||||
"Output directory for img2img grids": "Dossier de sortie pour les grilles img2img",
|
||||
"Directory for saving images using the Save button": "Dossier de sauvegarde des images pour le bouton `Enregistrer`",
|
||||
"Saving to a directory": "Enregistrer dans un dossier",
|
||||
"Save images to a subdirectory": "Enregistrer les images dans un sous dossier",
|
||||
"Save grids to a subdirectory": "Enregistrer les grilles dans un sous dossier",
|
||||
"When using \"Save\" button, save images to a subdirectory": "Lors de l'utilisation du bouton \"Enregistrer\", sauvegarder les images dans un sous dossier",
|
||||
"Directory name pattern": "Motif pour le nom des dossiers",
|
||||
"Max prompt words for [prompt_words] pattern": "Maximum de mot pour le motif [prompt_words]",
|
||||
"Upscaling": "Agrandissement",
|
||||
"Tile size for ESRGAN upscalers. 0 = no tiling.": "Taille des tuile for les agrandisseurs ESRGAN. 0 = mode tuile désactivé.",
|
||||
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Chevauchement des tuiles, en pixel pour l'agrandisseur ESRGAN. Valeur faible = couture visible",
|
||||
"Tile size for all SwinIR.": "Taille de la tuile pour tous les agrandisseur SwinIR.",
|
||||
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Chevauchement de tuile, en pixels pour SwinIR. Valeur faible = couture visible",
|
||||
"LDSR processing steps. Lower = faster": "Echantillon du traitement LDSR. Valeur faible = plus rapide",
|
||||
"Upscaler for img2img": "Agrandisseur pour img2img",
|
||||
"Upscale latent space image when doing hires. fix": "Agrandir l'image de l'espace latent lors de la correction haute résolution",
|
||||
"Face restoration": "Restauration de visage",
|
||||
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Paramètre de poids pour CodeFormer; 0 = effet maximum ; 1 = effet minimum",
|
||||
"Move face restoration model from VRAM into RAM after processing": "Déplacer le modèle de restauration de visage de la VRAM vers la RAM après traitement",
|
||||
"System": "Système",
|
||||
"VRAM usage polls per second during generation. Set to 0 to disable.": "Fréquence d'interrogation par seconde pendant la génération. Mettez la valeur à 0 pour désactiver.",
|
||||
"Always print all generation info to standard output": "Toujours afficher toutes les informations de génération dans la sortie standard",
|
||||
"Add a second progress bar to the console that shows progress for an entire job.": "Ajouter un seconde barre de progression dans la console montrant l'avancement pour un tâche complète.",
|
||||
"Training": "Entrainement",
|
||||
"Unload VAE and CLIP from VRAM when training": "Décharger VAE et CLIP de la VRAM pendant l'entrainement",
|
||||
"Filename word regex": "Regex de mot",
|
||||
"Filename join string": "Chaine de caractère pour lier les noms de fichier",
|
||||
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Nombre de répétition pour une image unique par époque; utilisé seulement pour afficher le nombre d'époques",
|
||||
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Enregistrer un csv contenant la perte dans le dossier de journalisation toutes les N étapes, 0 pour désactiver",
|
||||
"Stable Diffusion": "Stable Diffusion",
|
||||
"Checkpoints to cache in RAM": "Checkpoint à mettre en cache dans la RAM",
|
||||
"Hypernetwork strength": "Force de l'Hypernetwork",
|
||||
"Apply color correction to img2img results to match original colors.": "Appliquer une correction de couleur aux résultats img2img afin de conserver les couleurs d'origine",
|
||||
"Save a copy of image before applying color correction to img2img results": "Enregistrer une copie de l'image avant d'appliquer les résultats de la correction de couleur img2img",
|
||||
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Avec img2img, executer exactement le nombre d'étapes spécifiées par la glissière (normalement moins d'étapes sont executées quand la réduction du bruit est plus faible).",
|
||||
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Activer la quantisation des échantillionneurs K pour des résultats plus nets et plus propres. Cela peut modifier les valeurs aléatoires existantes. Requiert un redémarrage pour être actif.",
|
||||
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Emphase : utilisez (texte) afin de forcer le modèle à porter plus d'attention au texte et [texte] afin qu'il y porte moins attention",
|
||||
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "Utilisez l'ancienne méthode d'emphase. Peut être utile afin de reproduire d'anciennes valeurs aléatoires.",
|
||||
"Make K-diffusion samplers produce same images in a batch as when making a single image": "Demander aux échantillionneurs K-diffusion de produire les mêmes dans un lot que lors de la génération d'une image unique",
|
||||
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Améliorer la cohérence en remplissant (padding) à partir de la dernière virgule dans les X jetons quand on en utilise plus de 75",
|
||||
"Filter NSFW content": "Filtrer le contenu +18 (NSFW)",
|
||||
"Stop At last layers of CLIP model": "S'arrêter aux derniers niveaux du modèle CLIP",
|
||||
"Interrogate Options": "Options d'intérrogation",
|
||||
"Interrogate: keep models in VRAM": "Interroger : conserver les modèles en VRAM",
|
||||
"Interrogate: use artists from artists.csv": "Interroger : utiliser les artistes dans artists.csv",
|
||||
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interroger : inclure la correspondance du classement des labels de modèle dans les résultats (N'a pas d'effet sur les interrogateurs basés sur des descriptions) ",
|
||||
"Interrogate: num_beams for BLIP": "Interroger : num_beams pour BLIP",
|
||||
"Interrogate: minimum description length (excluding artists, etc..)": "Interroger : longueur minimale de la description (excluant les artistes, etc.)",
|
||||
"Interrogate: maximum description length": "Interroger : longueur maximale de la description",
|
||||
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP : nombre maximum de lignes dans le fichier texte (0 = pas de limite)",
|
||||
"Interrogate: deepbooru score threshold": "Interroger : seuil du score deepbooru",
|
||||
"Interrogate: deepbooru sort alphabetically": "Interroger : classement alphabétique deepbooru",
|
||||
"use spaces for tags in deepbooru": "Utiliser des espaces pour les étiquettes dans deepbooru",
|
||||
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "échapper (\\) les crochets dans deepbooru (afin qu'ils puissent être utilisés littéralement et non pour mettre en emphase)",
|
||||
"User interface": "Interface utilisateur",
|
||||
"Show progressbar": "Afficher la barre de progression",
|
||||
"Show image creation progress every N sampling steps. Set 0 to disable.": "Afficher l'état d'avancement de la création d'image toutes les X étapes d'échantillionnage. Utiliser 0 pour désactiver.",
|
||||
"Show grid in results for web": "Afficher la grille dans les résultats web",
|
||||
"Do not show any images in results for web": "N'afficher aucune image dans les résultats web'",
|
||||
"Add model hash to generation information": "Ajouter le hash du modèle dans l'information de génération",
|
||||
"Add model name to generation information": "Ajouter le nom du modèle dans l'information de génération",
|
||||
"Font for image grids that have text": "Police pour les grilles d'images contenant du texte",
|
||||
"Enable full page image viewer": "Activer l'affichage des images en plein écran",
|
||||
"Show images zoomed in by default in full page image viewer": "Afficher les images zoomées par défaut lors de l'affichage en plein écran",
|
||||
"Show generation progress in window title.": "Afficher l'avancement de la génération dans le titre de la fenêtre.",
|
||||
"Quicksettings list": "Liste de réglages rapides",
|
||||
"Localization (requires restart)": "Localisation (requiert un redémarrage)",
|
||||
"Sampler parameters": "Paramètres de l'échantillionneur",
|
||||
"Hide samplers in user interface (requires restart)": "Cacher les échantillonneurs dans l'interface utilisateur (requiert un redémarrage)",
|
||||
"eta (noise multiplier) for DDIM": "eta (multiplicateur de bruit) pour DDIM",
|
||||
"eta (noise multiplier) for ancestral samplers": "eta (multiplicateur de bruit) poru les échantillionneurs de type 'ancestral'",
|
||||
"img2img DDIM discretize": "Discrétisation DDIM pour img2img",
|
||||
"uniform": "uniforme",
|
||||
"quad": "quad",
|
||||
"sigma churn": "sigma churn",
|
||||
"sigma tmin": "sigma tmin",
|
||||
"sigma noise": "sigma noise",
|
||||
"Eta noise seed delta": "Eta noise seed delta",
|
||||
"Request browser notifications": "Demander les notifications au navigateur",
|
||||
"Download localization template": "Télécharger le modèle de localisation",
|
||||
"Reload custom script bodies (No ui updates, No restart)": "Recharger le contenu des scripts personnalisés (Pas de mise à jour de l'interface, Pas de redémarrage)",
|
||||
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Redémarrer Gradio et rafraichir les composants (Scripts personnalisés, ui.py, js et css uniquement)",
|
||||
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Requête (Ctrl + Entrée ou Alt + Entrée pour générer)",
|
||||
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Requête négative (Ctrl + Entrée ou Alt + Entrée pour générer)",
|
||||
"Add a random artist to the prompt.": "Ajouter un artiste aléatoire à la requête",
|
||||
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Lire les paramètres de génération depuis la requête, ou depuis la dernière génération si la requête est vide dans l'interface utilisateur.",
|
||||
"Save style": "Sauvegarder le style",
|
||||
"Apply selected styles to current prompt": "Appliquer les styles séléctionnés à la requête actuelle",
|
||||
"Stop processing current image and continue processing.": "Arrêter le traitement de l'image actuelle et continuer le traitement.",
|
||||
"Stop processing images and return any results accumulated so far.": "Arrêter le traitement des images et retourne les résultats accumulés depuis le début.",
|
||||
"Style to apply; styles have components for both positive and negative prompts and apply to both": "Style à appliquer ; les styles sont composés de requêtes positives et négatives et s'appliquent au deux",
|
||||
"Do not do anything special": "Ne rien faire de particulier",
|
||||
"Which algorithm to use to produce the image": "Quel algorithme utiliser pour produire l'image",
|
||||
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - très créatif, peut générer des images complètement différentes en fonction du nombre d'étapes, utiliser plus de 30 à 40 étapes n'améliore pas le résultat",
|
||||
"Denoising Diffusion Implicit Models - best at inpainting": "Modèles implicite de réduction du bruit à diffusion - utile pour l'inpainting",
|
||||
"Produce an image that can be tiled.": "Produit une image qui peut être bouclée (tuile).",
|
||||
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Utilise un processus en deux étapes afin de créer partiellement une image dans une résolution plus faible, l'agrandir et améliorer ses détails sans modifier la composition",
|
||||
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Détermine à quel point l'algorithme doit respecter le contenu de l'image. A 0 rien ne changera, à 1 l'image sera entièrement différente. Avec des valeurs inférieures à 1.0 le traitement utilisera moins d'étapes que ce que la glissière Étapes d'échantillionnage spécifie. ",
|
||||
"How many batches of images to create": "Combien de lots d'images créer",
|
||||
"How many image to create in a single batch": "Combien d'images créer par lot",
|
||||
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - spécifie à quel point l'image doit se conformer à la requête - des valeurs plus faibles produisent des résultats plus créatifs",
|
||||
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Une valeur qui détermine la sortie du générateur de nombres aléatoires - si vous créez une image avec les mêmes paramètres et valeur aléatoire qu'une autre, le résultat sera identique",
|
||||
"Set seed to -1, which will cause a new random number to be used every time": "Passer la valeur aléatoire à -1, cela causera qu'un nombre aléatoire différent sera utilisé à chaque fois",
|
||||
"Reuse seed from last generation, mostly useful if it was randomed": "Réutiliser la valeur aléatoire de la dernière génération, généralement utile uniquement si elle était randomisée",
|
||||
"Seed of a different picture to be mixed into the generation.": "Valeur aléatoire d'une image différente à mélanger dans la génération",
|
||||
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Force de la variation à produire. A 0 il n'y aura pas d'effet. A 1 l'image sera composée uniquement de la valeur aléatoire variable spécifiée (à l'exception des échantillionneurs `ancestral`)",
|
||||
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Essayer de produire une image similaire à ce qu'elle aurait été avec la même valeur aléatoire, mais dans la résolution spécifiée",
|
||||
"Separate values for X axis using commas.": "Séparer les valeurs pour l'axe X par des virgules",
|
||||
"Separate values for Y axis using commas.": "Séparer les valeurs pour l'axe Y par des virgules",
|
||||
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Ecrire l'image dans un dossier (par défaut - log/images) et les paramètres de génération dans un fichier csv.",
|
||||
"Open images output directory": "Ouvrir le dossier de sortie des images",
|
||||
"How much to blur the mask before processing, in pixels.": "Quantité de flou à appliquer au masque avant traitement, en pixels",
|
||||
"What to put inside the masked area before processing it with Stable Diffusion.": "Avec quoi remplir la zone masquée avant traitement par Stable Diffusion.",
|
||||
"fill it with colors of the image": "remplir avec les couleurs de l'image",
|
||||
"keep whatever was there originally": "conserver ce qui était présent à l'origine",
|
||||
"fill it with latent space noise": "remplir avec le bruit de l'espace latent",
|
||||
"fill it with latent space zeroes": "remplir avec des zéros dans l'espace latent",
|
||||
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Agrandir la région masquées à la résolution cible, exécuter l'inpainting, réduire à nouveau puis coller dans l'image originale",
|
||||
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Redimensionner l'image dans la résolution cible. A moins que la hauteur et la largeur coincident le ratio de l'image sera incorrect.",
|
||||
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Redimensionner l'image afin que l'entièreté de la résolution cible soit remplie par l'image. Recadrer les parties qui dépassent.",
|
||||
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Redimensionner l'image afin que l'entièreté de l'image soit contenue dans la résolution cible. Remplir l'espace vide avec les couleurs de l'image.",
|
||||
"How many times to repeat processing an image and using it as input for the next iteration": "Combien de fois répéter le traitement d'une image et l'utiliser comme entrée pour la prochaine itération",
|
||||
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "En mode bouclage (Loopback), à chaque tour de la boucle la force du réducteur de bruit est multipliée par cette valeur. <1 signifie réduire la variation donc votre séquence convergera vers une image fixe. >1 signifie augmenter la variation donc votre séquence deviendra de plus en plus chaotique. ",
|
||||
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Pour l'agrandissement SD, de combien les tuiles doivent se chevaucher, en pixels. Les tuiles se chevauchent de manière à ce qu'il n'y ait pas de couture visible une fois fusionnées en une image. ",
|
||||
"A directory on the same machine where the server is running.": "Un dossier sur la même machine où le serveur tourne.",
|
||||
"Leave blank to save images to the default path.": "Laisser vide pour sauvegarder les images dans le chemin par défaut.",
|
||||
"Result = A * (1 - M) + B * M": "Résultat = A * (1 - M) + B * M",
|
||||
"Result = A + (B - C) * M": "Résultat = A + (B - C) * M",
|
||||
"Path to directory with input images": "Chemin vers le dossier contenant les images d'entrée",
|
||||
"Path to directory where to write outputs": "Chemin vers le dossier où écrire les sorties",
|
||||
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Utiliser les étiquettes suivantes pour définir le nom des images : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp] ; laisser vide pour le nom par défaut.",
|
||||
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Si cette option est activée le filigrane ne sera pas ajouté au images crées. Attention : si vous n'ajoutez pas de filigrane vous pourriez vous comporter de manière non éthique.",
|
||||
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Utiliser les étiquettes suivantes pour définir le nom des sous dossiers pour les images et les grilles : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp] ; laisser vide pour le nom par défaut.",
|
||||
"Restore low quality faces using GFPGAN neural network": "Restaurer les visages de basse qualité en utilisant le réseau neuronal GFPGAN",
|
||||
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Cette expression régulière sera utilisée pour extraire les mots depuis le nom de fichier ; ils seront joints en utilisant l'option ci dessous en une étiquette utilisée pour l'entrainement. Laisser vide pour conserver le texte du nom de fichier tel quel.",
|
||||
"This string will be used to join split words into a single line if the option above is enabled.": "Cette chaine de caractères sera utilisée pour joindre les mots séparés en une ligne unique si l'option ci dessus est activée.",
|
||||
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Liste des noms de paramètres, séparés par des virgules, pour les paramètres de la barre d'accès rapide en haut de page, plutôt que dans la page habituelle des paramètres. Voir modules/shared.py pour définir les noms. Requiert un redémarrage pour s'appliquer.",
|
||||
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Si cette valeur est différente de zéro elle sera ajoutée à la valeur aléatoire et utilisée pour initialiser le générateur de nombres aléatoires du bruit lors de l'utilisation des échantillonneurs supportants Eta. Vous pouvez l'utiliser pour produire encore plus de variation dans les images, ou vous pouvez utiliser ceci pour faire correspondre les images avec d'autres logiciels si vous savez ce que vous faites.",
|
||||
"Enable Autocomplete": "Activer l'autocomplétion",
|
||||
"/0.0": "/0.0"
|
||||
}
|
@ -7,6 +7,7 @@ import uvicorn
|
||||
from fastapi import Body, APIRouter, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel, Field, Json
|
||||
from typing import List
|
||||
import json
|
||||
import io
|
||||
import base64
|
||||
@ -15,12 +16,12 @@ from PIL import Image
|
||||
sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None)
|
||||
|
||||
class TextToImageResponse(BaseModel):
|
||||
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
parameters: Json
|
||||
info: Json
|
||||
|
||||
class ImageToImageResponse(BaseModel):
|
||||
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
parameters: Json
|
||||
info: Json
|
||||
|
||||
@ -65,7 +66,7 @@ class Api:
|
||||
i.save(buffer, format="png")
|
||||
b64images.append(base64.b64encode(buffer.getvalue()))
|
||||
|
||||
return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info))
|
||||
return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.js())
|
||||
|
||||
|
||||
|
||||
@ -111,7 +112,11 @@ class Api:
|
||||
i.save(buffer, format="png")
|
||||
b64images.append(base64.b64encode(buffer.getvalue()))
|
||||
|
||||
return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=json.dumps(processed.info))
|
||||
if (not img2imgreq.include_init_images):
|
||||
img2imgreq.init_images = None
|
||||
img2imgreq.mask = None
|
||||
|
||||
return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js())
|
||||
|
||||
def extrasapi(self):
|
||||
raise NotImplementedError
|
||||
|
@ -31,6 +31,7 @@ class ModelDef(BaseModel):
|
||||
field_alias: str
|
||||
field_type: Any
|
||||
field_value: Any
|
||||
field_exclude: bool = False
|
||||
|
||||
|
||||
class PydanticModelGenerator:
|
||||
@ -78,7 +79,8 @@ class PydanticModelGenerator:
|
||||
field=underscore(fields["key"]),
|
||||
field_alias=fields["key"],
|
||||
field_type=fields["type"],
|
||||
field_value=fields["default"]))
|
||||
field_value=fields["default"],
|
||||
field_exclude=fields["exclude"] if "exclude" in fields else False))
|
||||
|
||||
def generate_model(self):
|
||||
"""
|
||||
@ -86,7 +88,7 @@ class PydanticModelGenerator:
|
||||
from the json and overrides provided at initialization
|
||||
"""
|
||||
fields = {
|
||||
d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def
|
||||
d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def
|
||||
}
|
||||
DynamicModel = create_model(self._model_name, **fields)
|
||||
DynamicModel.__config__.allow_population_by_field_name = True
|
||||
@ -102,5 +104,5 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
|
||||
StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
|
||||
"StableDiffusionProcessingImg2Img",
|
||||
StableDiffusionProcessingImg2Img,
|
||||
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}]
|
||||
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}]
|
||||
).generate_model()
|
@ -5,6 +5,7 @@ import html
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import inspect
|
||||
|
||||
import modules.textual_inversion.dataset
|
||||
import torch
|
||||
@ -15,10 +16,12 @@ from modules import devices, processing, sd_models, shared
|
||||
from modules.textual_inversion import textual_inversion
|
||||
from modules.textual_inversion.learn_schedule import LearnRateScheduler
|
||||
from torch import einsum
|
||||
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
|
||||
|
||||
from collections import defaultdict, deque
|
||||
from statistics import stdev, mean
|
||||
|
||||
|
||||
class HypernetworkModule(torch.nn.Module):
|
||||
multiplier = 1.0
|
||||
activation_dict = {
|
||||
@ -26,9 +29,12 @@ class HypernetworkModule(torch.nn.Module):
|
||||
"leakyrelu": torch.nn.LeakyReLU,
|
||||
"elu": torch.nn.ELU,
|
||||
"swish": torch.nn.Hardswish,
|
||||
"tanh": torch.nn.Tanh,
|
||||
"sigmoid": torch.nn.Sigmoid,
|
||||
}
|
||||
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
|
||||
|
||||
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False):
|
||||
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False, activate_output=False):
|
||||
super().__init__()
|
||||
|
||||
assert layer_structure is not None, "layer_structure must not be None"
|
||||
@ -65,9 +71,24 @@ class HypernetworkModule(torch.nn.Module):
|
||||
else:
|
||||
for layer in self.linear:
|
||||
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
|
||||
layer.weight.data.normal_(mean=0.0, std=0.01)
|
||||
layer.bias.data.zero_()
|
||||
|
||||
w, b = layer.weight.data, layer.bias.data
|
||||
if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
|
||||
normal_(w, mean=0.0, std=0.01)
|
||||
normal_(b, mean=0.0, std=0.005)
|
||||
elif weight_init == 'XavierUniform':
|
||||
xavier_uniform_(w)
|
||||
zeros_(b)
|
||||
elif weight_init == 'XavierNormal':
|
||||
xavier_normal_(w)
|
||||
zeros_(b)
|
||||
elif weight_init == 'KaimingUniform':
|
||||
kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
|
||||
zeros_(b)
|
||||
elif weight_init == 'KaimingNormal':
|
||||
kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
|
||||
zeros_(b)
|
||||
else:
|
||||
raise KeyError(f"Key {weight_init} is not defined as initialization!")
|
||||
self.to(devices.device)
|
||||
|
||||
def fix_old_state_dict(self, state_dict):
|
||||
@ -105,7 +126,7 @@ class Hypernetwork:
|
||||
filename = None
|
||||
name = None
|
||||
|
||||
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False, activate_output=False):
|
||||
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False)
|
||||
self.filename = None
|
||||
self.name = name
|
||||
self.layers = {}
|
||||
@ -114,14 +135,15 @@ class Hypernetwork:
|
||||
self.sd_checkpoint_name = None
|
||||
self.layer_structure = layer_structure
|
||||
self.activation_func = activation_func
|
||||
self.weight_init = weight_init
|
||||
self.add_layer_norm = add_layer_norm
|
||||
self.use_dropout = use_dropout
|
||||
self.activate_output = activate_output
|
||||
|
||||
for size in enable_sizes or []:
|
||||
self.layers[size] = (
|
||||
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
)
|
||||
|
||||
def weights(self):
|
||||
@ -145,6 +167,7 @@ class Hypernetwork:
|
||||
state_dict['layer_structure'] = self.layer_structure
|
||||
state_dict['activation_func'] = self.activation_func
|
||||
state_dict['is_layer_norm'] = self.add_layer_norm
|
||||
state_dict['weight_initialization'] = self.weight_init
|
||||
state_dict['use_dropout'] = self.use_dropout
|
||||
state_dict['sd_checkpoint'] = self.sd_checkpoint
|
||||
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
|
||||
@ -160,16 +183,22 @@ class Hypernetwork:
|
||||
state_dict = torch.load(filename, map_location='cpu')
|
||||
|
||||
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
|
||||
print(self.layer_structure)
|
||||
self.activation_func = state_dict.get('activation_func', None)
|
||||
print(f"Activation function is {self.activation_func}")
|
||||
self.weight_init = state_dict.get('weight_initialization', 'Normal')
|
||||
print(f"Weight initialization is {self.weight_init}")
|
||||
self.add_layer_norm = state_dict.get('is_layer_norm', False)
|
||||
self.use_dropout = state_dict.get('use_dropout', False)
|
||||
print(f"Layer norm is set to {self.add_layer_norm}")
|
||||
self.use_dropout = state_dict.get('use_dropout', False
|
||||
print(f"Dropout usage is set to {self.use_dropout}" )
|
||||
self.activate_output = state_dict.get('activate_output', True)
|
||||
|
||||
for size, sd in state_dict.items():
|
||||
if type(size) == int:
|
||||
self.layers[size] = (
|
||||
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout, self.activate_output),
|
||||
)
|
||||
|
||||
self.name = state_dict.get('name', self.name)
|
||||
|
@ -8,8 +8,9 @@ import modules.textual_inversion.textual_inversion
|
||||
from modules import devices, sd_hijack, shared
|
||||
from modules.hypernetworks import hypernetwork
|
||||
|
||||
keys = list(hypernetwork.HypernetworkModule.activation_dict.keys())
|
||||
|
||||
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False):
|
||||
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
|
||||
# Remove illegal characters from name.
|
||||
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
|
||||
|
||||
@ -25,6 +26,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
|
||||
enable_sizes=[int(x) for x in enable_sizes],
|
||||
layer_structure=layer_structure,
|
||||
activation_func=activation_func,
|
||||
weight_init=weight_init,
|
||||
add_layer_norm=add_layer_norm,
|
||||
use_dropout=use_dropout,
|
||||
)
|
||||
|
@ -277,7 +277,7 @@ invalid_filename_chars = '<>:"/\\|?*\n'
|
||||
invalid_filename_prefix = ' '
|
||||
invalid_filename_postfix = ' .'
|
||||
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
|
||||
re_pattern = re.compile(r"([^\[\]]+|\[([^]]+)]|[\[\]]*)")
|
||||
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
|
||||
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
|
||||
max_filename_part_length = 128
|
||||
|
||||
@ -343,7 +343,7 @@ class FilenameGenerator:
|
||||
def datetime(self, *args):
|
||||
time_datetime = datetime.datetime.now()
|
||||
|
||||
time_format = args[0] if len(args) > 0 else self.default_time_format
|
||||
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
|
||||
try:
|
||||
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
|
||||
except pytz.exceptions.UnknownTimeZoneError as _:
|
||||
@ -362,9 +362,9 @@ class FilenameGenerator:
|
||||
|
||||
for m in re_pattern.finditer(x):
|
||||
text, pattern = m.groups()
|
||||
res += text
|
||||
|
||||
if pattern is None:
|
||||
res += text
|
||||
continue
|
||||
|
||||
pattern_args = []
|
||||
@ -385,12 +385,9 @@ class FilenameGenerator:
|
||||
print(f"Error adding [{pattern}] to filename", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
if replacement is None:
|
||||
res += f'[{pattern}]'
|
||||
else:
|
||||
if replacement is not None:
|
||||
res += str(replacement)
|
||||
|
||||
continue
|
||||
continue
|
||||
|
||||
res += f'[{pattern}]'
|
||||
|
||||
|
341
modules/textual_inversion/autocrop.py
Normal file
341
modules/textual_inversion/autocrop.py
Normal file
@ -0,0 +1,341 @@
|
||||
import cv2
|
||||
import requests
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from math import log, sqrt
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
GREEN = "#0F0"
|
||||
BLUE = "#00F"
|
||||
RED = "#F00"
|
||||
|
||||
|
||||
def crop_image(im, settings):
|
||||
""" Intelligently crop an image to the subject matter """
|
||||
|
||||
scale_by = 1
|
||||
if is_landscape(im.width, im.height):
|
||||
scale_by = settings.crop_height / im.height
|
||||
elif is_portrait(im.width, im.height):
|
||||
scale_by = settings.crop_width / im.width
|
||||
elif is_square(im.width, im.height):
|
||||
if is_square(settings.crop_width, settings.crop_height):
|
||||
scale_by = settings.crop_width / im.width
|
||||
elif is_landscape(settings.crop_width, settings.crop_height):
|
||||
scale_by = settings.crop_width / im.width
|
||||
elif is_portrait(settings.crop_width, settings.crop_height):
|
||||
scale_by = settings.crop_height / im.height
|
||||
|
||||
im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
|
||||
im_debug = im.copy()
|
||||
|
||||
focus = focal_point(im_debug, settings)
|
||||
|
||||
# take the focal point and turn it into crop coordinates that try to center over the focal
|
||||
# point but then get adjusted back into the frame
|
||||
y_half = int(settings.crop_height / 2)
|
||||
x_half = int(settings.crop_width / 2)
|
||||
|
||||
x1 = focus.x - x_half
|
||||
if x1 < 0:
|
||||
x1 = 0
|
||||
elif x1 + settings.crop_width > im.width:
|
||||
x1 = im.width - settings.crop_width
|
||||
|
||||
y1 = focus.y - y_half
|
||||
if y1 < 0:
|
||||
y1 = 0
|
||||
elif y1 + settings.crop_height > im.height:
|
||||
y1 = im.height - settings.crop_height
|
||||
|
||||
x2 = x1 + settings.crop_width
|
||||
y2 = y1 + settings.crop_height
|
||||
|
||||
crop = [x1, y1, x2, y2]
|
||||
|
||||
results = []
|
||||
|
||||
results.append(im.crop(tuple(crop)))
|
||||
|
||||
if settings.annotate_image:
|
||||
d = ImageDraw.Draw(im_debug)
|
||||
rect = list(crop)
|
||||
rect[2] -= 1
|
||||
rect[3] -= 1
|
||||
d.rectangle(rect, outline=GREEN)
|
||||
results.append(im_debug)
|
||||
if settings.destop_view_image:
|
||||
im_debug.show()
|
||||
|
||||
return results
|
||||
|
||||
def focal_point(im, settings):
|
||||
corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
|
||||
entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
|
||||
face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
|
||||
|
||||
pois = []
|
||||
|
||||
weight_pref_total = 0
|
||||
if len(corner_points) > 0:
|
||||
weight_pref_total += settings.corner_points_weight
|
||||
if len(entropy_points) > 0:
|
||||
weight_pref_total += settings.entropy_points_weight
|
||||
if len(face_points) > 0:
|
||||
weight_pref_total += settings.face_points_weight
|
||||
|
||||
corner_centroid = None
|
||||
if len(corner_points) > 0:
|
||||
corner_centroid = centroid(corner_points)
|
||||
corner_centroid.weight = settings.corner_points_weight / weight_pref_total
|
||||
pois.append(corner_centroid)
|
||||
|
||||
entropy_centroid = None
|
||||
if len(entropy_points) > 0:
|
||||
entropy_centroid = centroid(entropy_points)
|
||||
entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
|
||||
pois.append(entropy_centroid)
|
||||
|
||||
face_centroid = None
|
||||
if len(face_points) > 0:
|
||||
face_centroid = centroid(face_points)
|
||||
face_centroid.weight = settings.face_points_weight / weight_pref_total
|
||||
pois.append(face_centroid)
|
||||
|
||||
average_point = poi_average(pois, settings)
|
||||
|
||||
if settings.annotate_image:
|
||||
d = ImageDraw.Draw(im)
|
||||
max_size = min(im.width, im.height) * 0.07
|
||||
if corner_centroid is not None:
|
||||
color = BLUE
|
||||
box = corner_centroid.bounding(max_size * corner_centroid.weight)
|
||||
d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color)
|
||||
d.ellipse(box, outline=color)
|
||||
if len(corner_points) > 1:
|
||||
for f in corner_points:
|
||||
d.rectangle(f.bounding(4), outline=color)
|
||||
if entropy_centroid is not None:
|
||||
color = "#ff0"
|
||||
box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
|
||||
d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color)
|
||||
d.ellipse(box, outline=color)
|
||||
if len(entropy_points) > 1:
|
||||
for f in entropy_points:
|
||||
d.rectangle(f.bounding(4), outline=color)
|
||||
if face_centroid is not None:
|
||||
color = RED
|
||||
box = face_centroid.bounding(max_size * face_centroid.weight)
|
||||
d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color)
|
||||
d.ellipse(box, outline=color)
|
||||
if len(face_points) > 1:
|
||||
for f in face_points:
|
||||
d.rectangle(f.bounding(4), outline=color)
|
||||
|
||||
d.ellipse(average_point.bounding(max_size), outline=GREEN)
|
||||
|
||||
return average_point
|
||||
|
||||
|
||||
def image_face_points(im, settings):
|
||||
if settings.dnn_model_path is not None:
|
||||
detector = cv2.FaceDetectorYN.create(
|
||||
settings.dnn_model_path,
|
||||
"",
|
||||
(im.width, im.height),
|
||||
0.9, # score threshold
|
||||
0.3, # nms threshold
|
||||
5000 # keep top k before nms
|
||||
)
|
||||
faces = detector.detect(np.array(im))
|
||||
results = []
|
||||
if faces[1] is not None:
|
||||
for face in faces[1]:
|
||||
x = face[0]
|
||||
y = face[1]
|
||||
w = face[2]
|
||||
h = face[3]
|
||||
results.append(
|
||||
PointOfInterest(
|
||||
int(x + (w * 0.5)), # face focus left/right is center
|
||||
int(y + (h * 0.33)), # face focus up/down is close to the top of the head
|
||||
size = w,
|
||||
weight = 1/len(faces[1])
|
||||
)
|
||||
)
|
||||
return results
|
||||
else:
|
||||
np_im = np.array(im)
|
||||
gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
tries = [
|
||||
[ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
|
||||
]
|
||||
for t in tries:
|
||||
classifier = cv2.CascadeClassifier(t[0])
|
||||
minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
|
||||
try:
|
||||
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
|
||||
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
|
||||
except:
|
||||
continue
|
||||
|
||||
if len(faces) > 0:
|
||||
rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
|
||||
return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
|
||||
return []
|
||||
|
||||
|
||||
def image_corner_points(im, settings):
|
||||
grayscale = im.convert("L")
|
||||
|
||||
# naive attempt at preventing focal points from collecting at watermarks near the bottom
|
||||
gd = ImageDraw.Draw(grayscale)
|
||||
gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
|
||||
|
||||
np_im = np.array(grayscale)
|
||||
|
||||
points = cv2.goodFeaturesToTrack(
|
||||
np_im,
|
||||
maxCorners=100,
|
||||
qualityLevel=0.04,
|
||||
minDistance=min(grayscale.width, grayscale.height)*0.06,
|
||||
useHarrisDetector=False,
|
||||
)
|
||||
|
||||
if points is None:
|
||||
return []
|
||||
|
||||
focal_points = []
|
||||
for point in points:
|
||||
x, y = point.ravel()
|
||||
focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
|
||||
|
||||
return focal_points
|
||||
|
||||
|
||||
def image_entropy_points(im, settings):
|
||||
landscape = im.height < im.width
|
||||
portrait = im.height > im.width
|
||||
if landscape:
|
||||
move_idx = [0, 2]
|
||||
move_max = im.size[0]
|
||||
elif portrait:
|
||||
move_idx = [1, 3]
|
||||
move_max = im.size[1]
|
||||
else:
|
||||
return []
|
||||
|
||||
e_max = 0
|
||||
crop_current = [0, 0, settings.crop_width, settings.crop_height]
|
||||
crop_best = crop_current
|
||||
while crop_current[move_idx[1]] < move_max:
|
||||
crop = im.crop(tuple(crop_current))
|
||||
e = image_entropy(crop)
|
||||
|
||||
if (e > e_max):
|
||||
e_max = e
|
||||
crop_best = list(crop_current)
|
||||
|
||||
crop_current[move_idx[0]] += 4
|
||||
crop_current[move_idx[1]] += 4
|
||||
|
||||
x_mid = int(crop_best[0] + settings.crop_width/2)
|
||||
y_mid = int(crop_best[1] + settings.crop_height/2)
|
||||
|
||||
return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
|
||||
|
||||
|
||||
def image_entropy(im):
|
||||
# greyscale image entropy
|
||||
# band = np.asarray(im.convert("L"))
|
||||
band = np.asarray(im.convert("1"), dtype=np.uint8)
|
||||
hist, _ = np.histogram(band, bins=range(0, 256))
|
||||
hist = hist[hist > 0]
|
||||
return -np.log2(hist / hist.sum()).sum()
|
||||
|
||||
def centroid(pois):
|
||||
x = [poi.x for poi in pois]
|
||||
y = [poi.y for poi in pois]
|
||||
return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
|
||||
|
||||
|
||||
def poi_average(pois, settings):
|
||||
weight = 0.0
|
||||
x = 0.0
|
||||
y = 0.0
|
||||
for poi in pois:
|
||||
weight += poi.weight
|
||||
x += poi.x * poi.weight
|
||||
y += poi.y * poi.weight
|
||||
avg_x = round(x / weight)
|
||||
avg_y = round(y / weight)
|
||||
|
||||
return PointOfInterest(avg_x, avg_y)
|
||||
|
||||
|
||||
def is_landscape(w, h):
|
||||
return w > h
|
||||
|
||||
|
||||
def is_portrait(w, h):
|
||||
return h > w
|
||||
|
||||
|
||||
def is_square(w, h):
|
||||
return w == h
|
||||
|
||||
|
||||
def download_and_cache_models(dirname):
|
||||
download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
|
||||
model_file_name = 'face_detection_yunet.onnx'
|
||||
|
||||
if not os.path.exists(dirname):
|
||||
os.makedirs(dirname)
|
||||
|
||||
cache_file = os.path.join(dirname, model_file_name)
|
||||
if not os.path.exists(cache_file):
|
||||
print(f"downloading face detection model from '{download_url}' to '{cache_file}'")
|
||||
response = requests.get(download_url)
|
||||
with open(cache_file, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
if os.path.exists(cache_file):
|
||||
return cache_file
|
||||
return None
|
||||
|
||||
|
||||
class PointOfInterest:
|
||||
def __init__(self, x, y, weight=1.0, size=10):
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.weight = weight
|
||||
self.size = size
|
||||
|
||||
def bounding(self, size):
|
||||
return [
|
||||
self.x - size//2,
|
||||
self.y - size//2,
|
||||
self.x + size//2,
|
||||
self.y + size//2
|
||||
]
|
||||
|
||||
|
||||
class Settings:
|
||||
def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None):
|
||||
self.crop_width = crop_width
|
||||
self.crop_height = crop_height
|
||||
self.corner_points_weight = corner_points_weight
|
||||
self.entropy_points_weight = entropy_points_weight
|
||||
self.face_points_weight = face_points_weight
|
||||
self.annotate_image = annotate_image
|
||||
self.destop_view_image = False
|
||||
self.dnn_model_path = dnn_model_path
|
@ -7,12 +7,14 @@ import tqdm
|
||||
import time
|
||||
|
||||
from modules import shared, images
|
||||
from modules.paths import models_path
|
||||
from modules.shared import opts, cmd_opts
|
||||
from modules.textual_inversion import autocrop
|
||||
if cmd_opts.deepdanbooru:
|
||||
import modules.deepbooru as deepbooru
|
||||
|
||||
|
||||
def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2):
|
||||
def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
|
||||
try:
|
||||
if process_caption:
|
||||
shared.interrogator.load()
|
||||
@ -22,7 +24,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
|
||||
db_opts[deepbooru.OPT_INCLUDE_RANKS] = False
|
||||
deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts)
|
||||
|
||||
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio)
|
||||
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug)
|
||||
|
||||
finally:
|
||||
|
||||
@ -34,7 +36,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
|
||||
|
||||
|
||||
|
||||
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2):
|
||||
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
|
||||
width = process_width
|
||||
height = process_height
|
||||
src = os.path.abspath(process_src)
|
||||
@ -113,6 +115,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
|
||||
splitted = image.crop((0, y, to_w, y + to_h))
|
||||
yield splitted
|
||||
|
||||
|
||||
for index, imagefile in enumerate(tqdm.tqdm(files)):
|
||||
subindex = [0]
|
||||
filename = os.path.join(src, imagefile)
|
||||
@ -137,11 +140,36 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
|
||||
ratio = (img.height * width) / (img.width * height)
|
||||
inverse_xy = True
|
||||
|
||||
process_default_resize = True
|
||||
|
||||
if process_split and ratio < 1.0 and ratio <= split_threshold:
|
||||
for splitted in split_pic(img, inverse_xy):
|
||||
save_pic(splitted, index, existing_caption=existing_caption)
|
||||
else:
|
||||
process_default_resize = False
|
||||
|
||||
if process_focal_crop and img.height != img.width:
|
||||
|
||||
dnn_model_path = None
|
||||
try:
|
||||
dnn_model_path = autocrop.download_and_cache_models(os.path.join(models_path, "opencv"))
|
||||
except Exception as e:
|
||||
print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
|
||||
|
||||
autocrop_settings = autocrop.Settings(
|
||||
crop_width = width,
|
||||
crop_height = height,
|
||||
face_points_weight = process_focal_crop_face_weight,
|
||||
entropy_points_weight = process_focal_crop_entropy_weight,
|
||||
corner_points_weight = process_focal_crop_edges_weight,
|
||||
annotate_image = process_focal_crop_debug,
|
||||
dnn_model_path = dnn_model_path,
|
||||
)
|
||||
for focal in autocrop.crop_image(img, autocrop_settings):
|
||||
save_pic(focal, index, existing_caption=existing_caption)
|
||||
process_default_resize = False
|
||||
|
||||
if process_default_resize:
|
||||
img = images.resize_image(1, img, width, height)
|
||||
save_pic(img, index, existing_caption=existing_caption)
|
||||
|
||||
shared.state.nextjob()
|
||||
shared.state.nextjob()
|
@ -157,6 +157,9 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
|
||||
cond_model = shared.sd_model.cond_stage_model
|
||||
embedding_layer = cond_model.wrapped.transformer.text_model.embeddings
|
||||
|
||||
with devices.autocast():
|
||||
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
|
||||
|
||||
ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"]
|
||||
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
|
||||
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
|
||||
|
@ -1238,7 +1238,8 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
new_hypernetwork_name = gr.Textbox(label="Name")
|
||||
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
|
||||
new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
|
||||
new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=["linear", "relu", "leakyrelu", "elu", "swish"])
|
||||
new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys)
|
||||
new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
|
||||
new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
|
||||
new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
|
||||
overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork")
|
||||
@ -1260,6 +1261,7 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
with gr.Row():
|
||||
process_flip = gr.Checkbox(label='Create flipped copies')
|
||||
process_split = gr.Checkbox(label='Split oversized images')
|
||||
process_focal_crop = gr.Checkbox(label='Auto focal point crop')
|
||||
process_caption = gr.Checkbox(label='Use BLIP for caption')
|
||||
process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True if cmd_opts.deepdanbooru else False)
|
||||
|
||||
@ -1267,6 +1269,12 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05)
|
||||
|
||||
with gr.Row(visible=False) as process_focal_crop_row:
|
||||
process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_focal_crop_debug = gr.Checkbox(label='Create debug image')
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(scale=3):
|
||||
gr.HTML(value="")
|
||||
@ -1280,6 +1288,12 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
outputs=[process_split_extra_row],
|
||||
)
|
||||
|
||||
process_focal_crop.change(
|
||||
fn=lambda show: gr_show(show),
|
||||
inputs=[process_focal_crop],
|
||||
outputs=[process_focal_crop_row],
|
||||
)
|
||||
|
||||
with gr.Tab(label="Train"):
|
||||
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
|
||||
with gr.Row():
|
||||
@ -1342,6 +1356,7 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
overwrite_old_hypernetwork,
|
||||
new_hypernetwork_layer_structure,
|
||||
new_hypernetwork_activation_func,
|
||||
new_hypernetwork_initialization_option,
|
||||
new_hypernetwork_add_layer_norm,
|
||||
new_hypernetwork_use_dropout
|
||||
],
|
||||
@ -1367,6 +1382,11 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
process_caption_deepbooru,
|
||||
process_split_threshold,
|
||||
process_overlap_ratio,
|
||||
process_focal_crop,
|
||||
process_focal_crop_face_weight,
|
||||
process_focal_crop_entropy_weight,
|
||||
process_focal_crop_edges_weight,
|
||||
process_focal_crop_debug,
|
||||
],
|
||||
outputs=[
|
||||
ti_output,
|
||||
|
@ -8,6 +8,8 @@ gradio==3.5
|
||||
invisible-watermark
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
requests
|
||||
piexif
|
||||
Pillow
|
||||
pytorch_lightning
|
||||
|
Loading…
Reference in New Issue
Block a user