mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-14 09:31:42 +08:00
152 lines
7.1 KiB
HTML
152 lines
7.1 KiB
HTML
<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta http-equiv="content-type" content="text/html;charset=utf-8"/>
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
|
<meta name="description" content="Annotated PyTorch implementation/tutorial of stable diffusion."/>
|
|
|
|
<meta name="twitter:card" content="summary"/>
|
|
<meta name="twitter:image:src" content="https://avatars1.githubusercontent.com/u/64068543?s=400&v=4"/>
|
|
<meta name="twitter:title" content="Stable Diffusion"/>
|
|
<meta name="twitter:description" content="Annotated PyTorch implementation/tutorial of stable diffusion."/>
|
|
<meta name="twitter:site" content="@labmlai"/>
|
|
<meta name="twitter:creator" content="@labmlai"/>
|
|
|
|
<meta property="og:url" content="https://nn.labml.ai/diffusion/stable_diffusion/index.html"/>
|
|
<meta property="og:title" content="Stable Diffusion"/>
|
|
<meta property="og:image" content="https://avatars1.githubusercontent.com/u/64068543?s=400&v=4"/>
|
|
<meta property="og:site_name" content="Stable Diffusion"/>
|
|
<meta property="og:type" content="object"/>
|
|
<meta property="og:title" content="Stable Diffusion"/>
|
|
<meta property="og:description" content="Annotated PyTorch implementation/tutorial of stable diffusion."/>
|
|
|
|
<title>Stable Diffusion</title>
|
|
<link rel="shortcut icon" href="/icon.png"/>
|
|
<link rel="stylesheet" href="../../pylit.css?v=1">
|
|
<link rel="canonical" href="https://nn.labml.ai/diffusion/stable_diffusion/index.html"/>
|
|
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.13.18/dist/katex.min.css" integrity="sha384-zTROYFVGOfTw7JV7KUu8udsvW2fx4lWOsCEDqhBreBwlHI4ioVRtmIvEThzJHGET" crossorigin="anonymous">
|
|
|
|
<!-- Global site tag (gtag.js) - Google Analytics -->
|
|
<script async src="https://www.googletagmanager.com/gtag/js?id=G-4V3HC8HBLH"></script>
|
|
<script>
|
|
window.dataLayer = window.dataLayer || [];
|
|
|
|
function gtag() {
|
|
dataLayer.push(arguments);
|
|
}
|
|
|
|
gtag('js', new Date());
|
|
|
|
gtag('config', 'G-4V3HC8HBLH');
|
|
</script>
|
|
</head>
|
|
<body>
|
|
<div id='container'>
|
|
<div id="background"></div>
|
|
<div class='section'>
|
|
<div class='docs'>
|
|
<p>
|
|
<a class="parent" href="/">home</a>
|
|
<a class="parent" href="../index.html">diffusion</a>
|
|
<a class="parent" href="index.html">stable_diffusion</a>
|
|
</p>
|
|
<p>
|
|
<a href="https://github.com/sponsors/labmlai" target="_blank">
|
|
<img alt="Sponsor"
|
|
src="https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86"
|
|
style="max-width:100%;"/></a>
|
|
<a href="https://github.com/labmlai/annotated_deep_learning_paper_implementations" target="_blank">
|
|
<img alt="Github"
|
|
src="https://img.shields.io/github/stars/labmlai/annotated_deep_learning_paper_implementations?style=social"
|
|
style="max-width:100%;"/></a>
|
|
<a href="https://twitter.com/labmlai" rel="nofollow" target="_blank">
|
|
<img alt="Twitter"
|
|
src="https://img.shields.io/twitter/follow/labmlai?style=social"
|
|
style="max-width:100%;"/></a>
|
|
</p>
|
|
<p>
|
|
<a href="https://github.com/labmlai/annotated_deep_learning_paper_implementations/tree/master/labml_nn/diffusion/stable_diffusion/__init__.py" target="_blank">
|
|
View code on Github</a>
|
|
</p>
|
|
</div>
|
|
</div>
|
|
<div class='section' id='section-0'>
|
|
<div class='docs doc-strings'>
|
|
<div class='section-link'>
|
|
<a href='#section-0'>#</a>
|
|
</div>
|
|
<h1>Stable Diffusion</h1>
|
|
<p>This is based on official stable diffusion repository <a href="https://github.com/CompVis/stable-diffusion">CompVis/stable-diffusion</a>. We have kept the model structure same so that open sourced weights could be directly loaded. Our implementation does not contain training code.</p>
|
|
<h3><a href="https://promptart.labml.ai">PromptArt</a></h3>
|
|
<p><img alt="PromptArt" src="https://labml.ai/images/promptart-feed.webp"></p>
|
|
<p>We have deployed a stable diffusion based image generation service at <a href="https://promptart.labml.ai">promptart.labml.ai</a></p>
|
|
<h3><a href="latent_diffusion.html">Latent Diffusion Model</a></h3>
|
|
<p>The core is the <a href="latent_diffusion.html">Latent Diffusion Model</a>. It consists of:</p>
|
|
<ul><li><a href="model/autoencoder.html">AutoEncoder</a> </li>
|
|
<li><a href="model/unet.html">U-Net</a> with <a href="model/unet_attention.html">attention</a></li></ul>
|
|
<p>The diffusion is conditioned based on <a href="model/clip_embedder.html">CLIP embeddings</a>.</p>
|
|
<h3><a href="sampler/index.html">Sampling Algorithms</a></h3>
|
|
<p>We have implemented the following <a href="sampler/index.html">sampling algorithms</a>:</p>
|
|
<ul><li><a href="sampler/ddpm.html">Denoising Diffusion Probabilistic Models (DDPM) Sampling</a> </li>
|
|
<li><a href="sampler/ddim.html">Denoising Diffusion Implicit Models (DDIM) Sampling</a></li></ul>
|
|
<h3><a href="scripts/index.html">Example Scripts</a></h3>
|
|
<p>Here are the image generation scripts:</p>
|
|
<ul><li><a href="scripts/text_to_image.html">Generate images from text prompts</a> </li>
|
|
<li><a href="scripts/image_to_image.html">Generate images based on a given image, guided by a prompt</a> </li>
|
|
<li><a href="scripts/in_paint.html">Modify parts of a given image based on a text prompt</a></li></ul>
|
|
<h4><a href="util.html">Utilities</a></h4>
|
|
<p><a href="util.html"><code class="highlight"><span></span><span class="n">util</span><span class="o">.</span><span class="n">py</span></code>
|
|
</a> defines the utility functions.</p>
|
|
|
|
</div>
|
|
<div class='code'>
|
|
<div class="highlight"><pre></pre></div>
|
|
</div>
|
|
</div>
|
|
<div class='footer'>
|
|
<a href="https://papers.labml.ai">Trending Research Papers</a>
|
|
<a href="https://labml.ai">labml.ai</a>
|
|
</div>
|
|
</div>
|
|
<script src=../../interactive.js?v=1"></script>
|
|
<script>
|
|
function handleImages() {
|
|
var images = document.querySelectorAll('p>img')
|
|
|
|
for (var i = 0; i < images.length; ++i) {
|
|
handleImage(images[i])
|
|
}
|
|
}
|
|
|
|
function handleImage(img) {
|
|
img.parentElement.style.textAlign = 'center'
|
|
|
|
var modal = document.createElement('div')
|
|
modal.id = 'modal'
|
|
|
|
var modalContent = document.createElement('div')
|
|
modal.appendChild(modalContent)
|
|
|
|
var modalImage = document.createElement('img')
|
|
modalContent.appendChild(modalImage)
|
|
|
|
var span = document.createElement('span')
|
|
span.classList.add('close')
|
|
span.textContent = 'x'
|
|
modal.appendChild(span)
|
|
|
|
img.onclick = function () {
|
|
console.log('clicked')
|
|
document.body.appendChild(modal)
|
|
modalImage.src = img.src
|
|
}
|
|
|
|
span.onclick = function () {
|
|
document.body.removeChild(modal)
|
|
}
|
|
}
|
|
|
|
handleImages()
|
|
</script>
|
|
</body>
|
|
</html> |