Compare commits

...

6 Commits

Author SHA1 Message Date
Logan Cusano
e874e3c785 Initial implementation of Stable Diffusion
- Needs to add attachment to discord
- Needs to delete image afterward
- Needs to calculate token usage
2023-03-13 00:03:59 -04:00
Logan Cusano
53256cdc42 Ignore generated images directory 2023-03-12 23:53:23 -04:00
Logan Cusano
3c05395ddf Merge branch 'main' into feature/#6-add-stable-diffusion 2023-03-12 15:59:42 -04:00
Logan Cusano
f840d754ac Merge branch 'main' of git.vpn.cusano.net:logan/Emmelia-Link-Flayer-Rewrite 2023-03-12 15:54:52 -04:00
Logan Cusano
cb8dfca8dd Return the source title from record promises 2023-03-12 15:54:22 -04:00
d372bcd8af Merge pull request 'feature/merge-multiple-images' (#7) from feature/merge-multiple-images into main
Images will now have their own embeds
2023-03-12 04:39:35 -04:00
11 changed files with 13654 additions and 9 deletions

3
.gitignore vendored
View File

@@ -3,4 +3,5 @@ node_modules/
package-lock.json
*.bak
*.log
*._.*
*._.*
.generations/

View File

@@ -1,6 +1,6 @@
const { submitImagePromptTransaction, DALLE_COLOR } = require("../controllers/openAiController");
const { SlashCommandBuilder } = require('discord.js');
const { DebugBuilder } = require("../utilities/debugBuilder");
const { submitImagePromptTransaction, DALLE_COLOR } = require("../controllers/openAiController");
const log = new DebugBuilder("server", "imagine");
const { EmmeliaEmbedBuilder } = require('../libUtils');

33
commands/stability.js Normal file
View File

@@ -0,0 +1,33 @@
const { SlashCommandBuilder } = require('discord.js');
const { DebugBuilder } = require("../utilities/debugBuilder");
const log = new DebugBuilder("server", "stability");
const { submitImageGenerationTransaction, STABILITY_COLOR } = require("../controllers/stabilityController");
const { EmmeliaEmbedBuilder } = require('../libUtils');
module.exports = {
data: new SlashCommandBuilder()
.setName('stability')
.setDescription('Replies with your input!'),
/*
.addStringOption(option =>
option.setName('input')
.setDescription('The input to echo back')
.setRequired(false)
.addChoices()),
*/
example: "stability",
isPrivileged: true,
requiresTokens: false,
defaultTokenUsage: 0,
deferInitialReply: true,
async execute(interaction) {
try{
await submitImageGenerationTransaction();
await interaction.editReply('**Pong.**'); // TODO - Add insults as the response to this command
}catch(err){
log.ERROR(err)
//await interaction.reply(err.toString());
}
}
};

View File

@@ -0,0 +1,71 @@
// GENERATED CODE -- DO NOT EDIT!
'use strict';
var grpc = require('grpc');
var generation_pb = require('./generation_pb.js');
var google_protobuf_struct_pb = require('google-protobuf/google/protobuf/struct_pb.js');
var tensors_pb = require('./tensors_pb.js');
function serialize_gooseai_Answer(arg) {
if (!(arg instanceof generation_pb.Answer)) {
throw new Error('Expected argument of type gooseai.Answer');
}
return Buffer.from(arg.serializeBinary());
}
function deserialize_gooseai_Answer(buffer_arg) {
return generation_pb.Answer.deserializeBinary(new Uint8Array(buffer_arg));
}
function serialize_gooseai_ChainRequest(arg) {
if (!(arg instanceof generation_pb.ChainRequest)) {
throw new Error('Expected argument of type gooseai.ChainRequest');
}
return Buffer.from(arg.serializeBinary());
}
function deserialize_gooseai_ChainRequest(buffer_arg) {
return generation_pb.ChainRequest.deserializeBinary(new Uint8Array(buffer_arg));
}
function serialize_gooseai_Request(arg) {
if (!(arg instanceof generation_pb.Request)) {
throw new Error('Expected argument of type gooseai.Request');
}
return Buffer.from(arg.serializeBinary());
}
function deserialize_gooseai_Request(buffer_arg) {
return generation_pb.Request.deserializeBinary(new Uint8Array(buffer_arg));
}
//
// gRPC services
//
var GenerationServiceService = exports.GenerationServiceService = {
generate: {
path: '/gooseai.GenerationService/Generate',
requestStream: false,
responseStream: true,
requestType: generation_pb.Request,
responseType: generation_pb.Answer,
requestSerialize: serialize_gooseai_Request,
requestDeserialize: deserialize_gooseai_Request,
responseSerialize: serialize_gooseai_Answer,
responseDeserialize: deserialize_gooseai_Answer,
},
chainGenerate: {
path: '/gooseai.GenerationService/ChainGenerate',
requestStream: false,
responseStream: true,
requestType: generation_pb.ChainRequest,
responseType: generation_pb.Answer,
requestSerialize: serialize_gooseai_ChainRequest,
requestDeserialize: deserialize_gooseai_ChainRequest,
responseSerialize: serialize_gooseai_Answer,
responseDeserialize: deserialize_gooseai_Answer,
},
};
exports.GenerationServiceClient = grpc.makeGenericClientConstructor(GenerationServiceService);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,117 @@
// package: gooseai
// file: generation.proto
var generation_pb = require("./generation_pb");
var grpc = require("@improbable-eng/grpc-web").grpc;
var GenerationService = (function () {
function GenerationService() {}
GenerationService.serviceName = "gooseai.GenerationService";
return GenerationService;
}());
GenerationService.Generate = {
methodName: "Generate",
service: GenerationService,
requestStream: false,
responseStream: true,
requestType: generation_pb.Request,
responseType: generation_pb.Answer
};
GenerationService.ChainGenerate = {
methodName: "ChainGenerate",
service: GenerationService,
requestStream: false,
responseStream: true,
requestType: generation_pb.ChainRequest,
responseType: generation_pb.Answer
};
exports.GenerationService = GenerationService;
function GenerationServiceClient(serviceHost, options) {
this.serviceHost = serviceHost;
this.options = options || {};
}
GenerationServiceClient.prototype.generate = function generate(requestMessage, metadata) {
var listeners = {
data: [],
end: [],
status: []
};
var client = grpc.invoke(GenerationService.Generate, {
request: requestMessage,
host: this.serviceHost,
metadata: metadata,
transport: this.options.transport,
debug: this.options.debug,
onMessage: function (responseMessage) {
listeners.data.forEach(function (handler) {
handler(responseMessage);
});
},
onEnd: function (status, statusMessage, trailers) {
listeners.status.forEach(function (handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners.end.forEach(function (handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners = null;
}
});
return {
on: function (type, handler) {
listeners[type].push(handler);
return this;
},
cancel: function () {
listeners = null;
client.close();
}
};
};
GenerationServiceClient.prototype.chainGenerate = function chainGenerate(requestMessage, metadata) {
var listeners = {
data: [],
end: [],
status: []
};
var client = grpc.invoke(GenerationService.ChainGenerate, {
request: requestMessage,
host: this.serviceHost,
metadata: metadata,
transport: this.options.transport,
debug: this.options.debug,
onMessage: function (responseMessage) {
listeners.data.forEach(function (handler) {
handler(responseMessage);
});
},
onEnd: function (status, statusMessage, trailers) {
listeners.status.forEach(function (handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners.end.forEach(function (handler) {
handler({ code: status, details: statusMessage, metadata: trailers });
});
listeners = null;
}
});
return {
on: function (type, handler) {
listeners[type].push(handler);
return this;
},
cancel: function () {
listeners = null;
client.close();
}
};
};
exports.GenerationServiceClient = GenerationServiceClient;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,60 @@
// Debug
const { DebugBuilder } = require("../utilities/debugBuilder");
const log = new DebugBuilder("server", "stabilityController");
// Modules
const Generation = require("./stabilityAi/generation_pb");
const { GenerationServiceClient } = require("./stabilityAi/generation_pb_service");
const { grpc } = require("@improbable-eng/grpc-web");
const GRPCWeb = grpc;
const { NodeHttpTransport } = require("@improbable-eng/grpc-web-node-http-transport");
const fs = require("fs");
const {
buildGenerationRequest,
executeGenerationRequest,
onGenerationComplete,
} = require("../utilities/stabilityHelpers");
// Set Global Color for this controller
exports.STABILITY_COLOR = 0xeb34b7;
// This is a NodeJS-specific requirement - browsers implementations should omit this line.
GRPCWeb.setDefaultTransport(NodeHttpTransport());
// Authenticate using your API key, don't commit your key to a public repository!
const metadata = new GRPCWeb.Metadata();
metadata.set("Authorization", "Bearer " + process.env.STABILITY_API_KEY);
// Create a generation client to use with all future requests
const stabilityClient = new GenerationServiceClient("https://grpc.stability.ai", {});
exports.submitImageGenerationTransaction = async () => {
const request = buildGenerationRequest("stable-diffusion-512-v2-1", {
type: "text-to-image",
prompts: [
{
text: "A dream of a distant galaxy, by Caspar David Friedrich, matte painting trending on artstation HQ",
},
],
width: 512,
height: 512,
samples: 1,
cfgScale: 13,
steps: 10,
sampler: Generation.DiffusionSampler.SAMPLER_K_DPMPP_2M,
});
log.DEBUG("Stability request: ", request, metadata, stabilityClient);
executeGenerationRequest(stabilityClient, request, metadata)
.then((response) => {
log.DEBUG("Stability Generation response: ". response)
onGenerationComplete(response);
return;
})
.catch((error) => {
log.ERROR("Failed to make text-to-image request:", error);
}
);
}

View File

@@ -140,7 +140,7 @@ exports.updateFeeds = (client) => {
if (!sendResults) {
log.ERROR("No sending results from sending a post: ", sendResults, existingRecord, post);
return recordReject();
return recordReject("No sending results from sending a post");
}
log.DEBUG("Saving post to database: ", sendResults, post.title, source.channel_id);
@@ -150,7 +150,7 @@ exports.updateFeeds = (client) => {
if (saveResults) {
log.DEBUG("Saved results: ", saveResults);
return recordResolve();
return recordResolve("Saved results", saveResults);
}
});
})
@@ -165,7 +165,7 @@ exports.updateFeeds = (client) => {
}
Promise.all(recordPromiseArray).then((values) => {
log.DEBUG("All posts finished for: ", source.title, values);
return resolve();
return resolve(source.title);
});
});
}))
@@ -174,9 +174,8 @@ exports.updateFeeds = (client) => {
// Wait for all connections to finish then close the temp connections
Promise.all(sourcePromiseArray).then((values) => {
log.DEBUG("Closing temp connections: ", values);
tempFeedStorage.closeConnection();
tempPostStorage.closeConnection();
log.DEBUG("All sources finished, closing temp connections: ", values);
tempConnection.end();
});
});
}

View File

@@ -27,7 +27,12 @@
"openai": "~3.1.0",
"parse-files": "~0.1.1",
"rss-parser": "~3.12.0",
"user-agents": "~1.0.1303"
"user-agents": "~1.0.1303",
"@improbable-eng/grpc-web": "~0.15.0",
"@improbable-eng/grpc-web-node-http-transport": "~0.15.0",
"google-protobuf": "~3.21.2",
"grpc": "~1.24.11",
"typescript": "~4.9.5"
},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",

View File

@@ -0,0 +1,263 @@
// Debug
const { DebugBuilder } = require("../utilities/debugBuilder");
const log = new DebugBuilder("server", "stabilityController");
// Modules
const Generation = require("../controllers/stabilityAi/generation_pb");
const { GenerationServiceClient } = require("../controllers/stabilityAi/generation_pb_service");
const fs = require("fs");
const { grpc } = require("@improbable-eng/grpc-web");
const GRPCWeb = grpc;
const path = require("node:path");
function isImageArtifact(artifact) {
log.DEBUG("Checking if image is an artifact", artifact);
if (artifact.getType() !== Generation.ArtifactType.ARTIFACT_IMAGE) return false;
if (artifact.getFinishReason() !== Generation.FinishReason.NULL) return false;
return artifact.hasBinary();
}
exports.isImageArtifact = isImageArtifact;
function isNSFWFilteredArtifact(artifact) {
if (artifact.getType() !== Generation.ArtifactType.ARTIFACT_IMAGE) return false;
if (artifact.getFinishReason() !== Generation.FinishReason.FILTER) return false;
return true;
}
exports.isNSFWFilteredArtifact = isNSFWFilteredArtifact;
/** Builds a generation request for a specified engine with the specified parameters. */
function buildGenerationRequest(engineID, params) {
if (params.type === "upscaling") {
const request = new Generation.Request()
request.setEngineId(engineID)
request.setRequestedType(Generation.ArtifactType.ARTIFACT_IMAGE)
request.setClassifier(new Generation.ClassifierParameters())
const imageParams = new Generation.ImageParameters()
if ("width" in params && !!params.width) {
imageParams.setWidth(params.width)
} else if ("height" in params && !!params.height) {
imageParams.setHeight(params.height)
}
request.setImage(imageParams)
request.addPrompt(createInitImagePrompt(params.initImage))
return request
}
const imageParams = new Generation.ImageParameters()
if (params.type === "text-to-image") {
params.width && imageParams.setWidth(params.width)
params.height && imageParams.setHeight(params.height)
}
// Set the number of images to generate (Default 1)
params.samples && imageParams.setSamples(params.samples)
// Set the steps (Default 30)
// Represents the amount of inference steps performed on image generation.
params.steps && imageParams.setSteps(params.steps)
// Set the seed (Default 0)
// Including a seed will cause the results to be deterministic.
// Omitting the seed or setting it to `0` will do the opposite.
params.seed && imageParams.addSeed(params.seed)
// Set the sampler (Default 'automatic')
// Omitting this value enables 'automatic' mode where we choose the best sampler for you based
// on the current payload. For example, since CLIP guidance only works on ancestral samplers,
// when CLIP guidance is enabled, we will automatically choose an ancestral sampler for you.
if (params.sampler) {
const transformType = new Generation.TransformType()
transformType.setDiffusion(params.sampler)
imageParams.setTransform(transformType)
}
// Set the Engine
// At the time of writing, valid engines are:
// stable-diffusion-v1,
// stable-diffusion-v1-5
// stable-diffusion-512-v2-0
// stable-diffusion-768-v2-0
// stable-diffusion-512-v2-1
// stable-diffusion-768-v2-1
// stable-inpainting-v1-0
// stable-inpainting-512-v2-0
// esrgan-v1-x2plus
const request = new Generation.Request()
request.setEngineId(engineID)
request.setRequestedType(Generation.ArtifactType.ARTIFACT_IMAGE)
request.setClassifier(new Generation.ClassifierParameters())
// Set the CFG scale (Default 7)
// Influences how strongly your generation is guided to match your prompt. Higher values match closer.
const samplerParams = new Generation.SamplerParameters()
params.cfgScale && samplerParams.setCfgScale(params.cfgScale)
const stepParams = new Generation.StepParameter()
stepParams.setScaledStep(0)
stepParams.setSampler(samplerParams)
const scheduleParams = new Generation.ScheduleParameters()
if (params.type === "image-to-image") {
// If we're doing image-to-image generation then we need to configure
// how much influence the initial image has on the diffusion process
scheduleParams.setStart(params.stepScheduleStart)
if (params.stepScheduleEnd) {
scheduleParams.setEnd(params.stepScheduleEnd)
}
} else if (params.type === "image-to-image-masking") {
// Step schedule start is always 1 for masking requests
scheduleParams.setStart(1)
}
stepParams.setSchedule(scheduleParams)
// Set CLIP Guidance (Default: None)
// NOTE: This only works with ancestral samplers. Omitting the sampler parameter above will ensure
// that we automatically choose an ancestral sampler for you when CLIP guidance is enabled.
if (params.clipGuidancePreset) {
const guidanceParameters = new Generation.GuidanceParameters()
guidanceParameters.setGuidancePreset(params.clipGuidancePreset)
stepParams.setGuidance(guidanceParameters)
}
imageParams.addParameters(stepParams)
request.setImage(imageParams)
params.prompts.forEach(textPrompt => {
const prompt = new Generation.Prompt()
prompt.setText(textPrompt.text)
// If provided, set the prompt's weight (use negative values for negative weighting)
if (textPrompt.weight) {
const promptParameters = new Generation.PromptParameters()
promptParameters.setWeight(textPrompt.weight)
prompt.setParameters(promptParameters)
}
request.addPrompt(prompt)
})
// Add image prompts if we're doing some kind of image-to-image generation or upscaling
if (params.type === "image-to-image") {
request.addPrompt(createInitImagePrompt(params.initImage))
} else if (params.type === "image-to-image-masking") {
request.addPrompt(createInitImagePrompt(params.initImage))
request.addPrompt(createMaskImagePrompt(params.maskImage))
}
return request
}
exports.buildGenerationRequest = buildGenerationRequest;
function createInitImagePrompt(imageBinary) {
const initImageArtifact = new Generation.Artifact()
initImageArtifact.setBinary(imageBinary)
initImageArtifact.setType(Generation.ArtifactType.ARTIFACT_IMAGE)
const initImageParameters = new Generation.PromptParameters()
initImageParameters.setInit(true)
const initImagePrompt = new Generation.Prompt()
initImagePrompt.setParameters(initImageParameters)
initImagePrompt.setArtifact(initImageArtifact)
return initImagePrompt
}
function createMaskImagePrompt(imageBinary) {
const maskImageArtifact = new Generation.Artifact()
maskImageArtifact.setBinary(imageBinary)
maskImageArtifact.setType(Generation.ArtifactType.ARTIFACT_MASK)
const maskImagePrompt = new Generation.Prompt()
maskImagePrompt.setArtifact(maskImageArtifact)
return maskImagePrompt
}
/** Executes a GenerationRequest, abstracting the gRPC streaming result behind a Promise */
async function executeGenerationRequest(
generationClient,
request,
metadata
) {
try {
const stream = generationClient.generate(request, metadata)
const answers = await new Promise((resolve, reject) => {
const answers = new Array()
stream.on("data", data => answers.push(data))
stream.on("end", () => resolve(answers))
stream.on("status", status => {
if (status.code === 0) return
reject(status.details)
})
})
return extractArtifacts(answers)
} catch (err) {
return err instanceof Error ? err : new Error(JSON.stringify(err))
}
}
exports.executeGenerationRequest = executeGenerationRequest;
function extractArtifacts(answers) {
const imageArtifacts = new Array()
const filteredArtifacts = new Array()
for (const answer of answers) {
for (const artifact of answer.getArtifactsList()) {
if (isImageArtifact(artifact)) {
imageArtifacts.push(artifact)
} else if (isNSFWFilteredArtifact(artifact)) {
filteredArtifacts.push(artifact)
}
}
}
return { filteredArtifacts, imageArtifacts }
}
/** Generation completion handler - replace this with your own logic */
function onGenerationComplete(response) {
if (response instanceof Error) {
log.ERROR("Generation failed", response)
throw response
}
log.DEBUG(
`${response.imageArtifacts.length} image${
response.imageArtifacts.length > 1 ? "s" : ""
} were successfully generated.`
)
// Do something with NSFW filtered artifacts
if (response.filteredArtifacts.length > 0) {
log.DEBUG(
`${response.filteredArtifacts.length} artifact` +
`${response.filteredArtifacts.length > 1 ? "s" : ""}` +
` were filtered by the NSFW classifier and need to be retried.`
)
}
// Do something with the successful image artifacts
response.imageArtifacts.forEach(artifact => {
try {
const writePath =
fs.writeFileSync(
path.resolve(__dirname, `../.generations/image-${artifact.getSeed()}.png`),
Buffer.from(artifact.getBinary_asU8())
)
} catch (error) {
log.ERROR("Failed to write resulting image to disk", error)
}
})
// For browser implementations: you could use the `artifact.getBinary_asB64()` method to get a
// base64 encoded string and then create a data URL from that and display it in an <img> tag.
}
exports.onGenerationComplete = onGenerationComplete;