Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 49 additions & 0 deletions .github/workflows/backend.yml
Original file line number Diff line number Diff line change
Expand Up @@ -993,6 +993,55 @@ jobs:
backend: "kitten-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# neutts
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-neutts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-neutts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-neutts'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
skip-drivers: 'false'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-neutts'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
backend-jobs-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
strategy:
Expand Down
9 changes: 9 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,9 @@ backends/llama-cpp-darwin: build
bash ./scripts/build/llama-cpp-darwin.sh
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"

backends/neutts: docker-build-neutts docker-save-neutts build
./local-ai backends install "ocifile://$(abspath ./backend-images/neutts.tar)"

build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh

Expand Down Expand Up @@ -432,6 +435,12 @@ docker-save-kitten-tts: backend-images
docker-save-chatterbox: backend-images
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar

docker-build-neutts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend

docker-save-neutts: backend-images
docker save local-ai-backend:neutts -o backend-images/neutts.tar

docker-build-kokoro:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend

Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
| **piper** | Fast neural TTS system | CPU |
| **kitten-tts** | Kitten TTS models | CPU |
| **silero-vad** | Voice Activity Detection | CPU |
| **neutts** | Text-to-speech with voice cloning | CUDA 12, ROCm, CPU |

### Image & Video Generation
| Backend | Description | Acceleration Support |
Expand All @@ -290,7 +291,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark | AMD Graphics |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
Expand Down
2 changes: 1 addition & 1 deletion backend/Dockerfile.python
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ RUN apt-get update && \
curl python3-pip \
python-is-python3 \
python3-dev llvm \
python3-venv make && \
python3-venv make cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip
Expand Down
62 changes: 62 additions & 0 deletions backend/index.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
capabilities:
nvidia: "cuda12-rfdetr"
intel: "intel-rfdetr"
#amd: "rocm-rfdetr"

Check warning on line 96 in backend/index.yaml

View workflow job for this annotation

GitHub Actions / Yamllint

96:6 [comments] missing starting space in comment
nvidia-l4t: "nvidia-l4t-arm64-rfdetr"
default: "cpu-rfdetr"
- &vllm
Expand Down Expand Up @@ -427,6 +427,68 @@
- text-to-speech
- TTS
license: apache-2.0
- &neutts
name: "neutts"
urls:
- https://github.com/neuphonic/neutts-air
description: |
NeuTTS Air is the world’s first super-realistic, on-device, TTS speech language model with instant voice cloning. Built off a 0.5B LLM backbone, NeuTTS Air brings natural-sounding speech, real-time performance, built-in security and speaker cloning to your local device - unlocking a new category of embedded voice agents, assistants, toys, and compliance-safe apps.
tags:
- text-to-speech
- TTS
license: apache-2.0
capabilities:
default: "cpu-neutts"
nvidia: "cuda12-neutts"
amd: "rocm-neutts"
nvidia-l4t: "nvidia-l4t-neutts"
- !!merge <<: *neutts
name: "neutts-development"
capabilities:
default: "cpu-neutts-development"
nvidia: "cuda12-neutts-development"
amd: "rocm-neutts-development"
nvidia-l4t: "nvidia-l4t-neutts-development"
- !!merge <<: *neutts
name: "cpu-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-neutts"
mirrors:
- localai/localai-backends:latest-cpu-neutts
- !!merge <<: *neutts
name: "cuda12-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-neutts"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-neutts
- !!merge <<: *neutts
name: "rocm-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-neutts"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-neutts
- !!merge <<: *neutts
name: "nvidia-l4t-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-neutts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-neutts
- !!merge <<: *neutts
name: "cpu-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-neutts"
mirrors:
- localai/localai-backends:master-cpu-neutts
- !!merge <<: *neutts
name: "cuda12-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-neutts"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-neutts
- !!merge <<: *neutts
name: "rocm-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-neutts"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-neutts
- !!merge <<: *neutts
name: "nvidia-l4t-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-neutts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-neutts
- !!merge <<: *mlx
name: "mlx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx"
Expand Down Expand Up @@ -791,7 +853,7 @@
capabilities:
nvidia: "cuda12-rfdetr-development"
intel: "intel-rfdetr-development"
#amd: "rocm-rfdetr-development"

Check warning on line 856 in backend/index.yaml

View workflow job for this annotation

GitHub Actions / Yamllint

856:6 [comments] missing starting space in comment
nvidia-l4t: "nvidia-l4t-arm64-rfdetr-development"
default: "cpu-rfdetr-development"
- !!merge <<: *rfdetr
Expand Down Expand Up @@ -1018,7 +1080,7 @@
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-diffusers
## exllama2

Check warning on line 1083 in backend/index.yaml

View workflow job for this annotation

GitHub Actions / Yamllint

1083:3 [comments-indentation] comment not indented like content
- !!merge <<: *exllama2
name: "exllama2-development"
capabilities:
Expand Down
23 changes: 23 additions & 0 deletions backend/python/neutts/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
.PHONY: neutts
neutts:
bash install.sh

.PHONY: run
run: neutts
@echo "Running neutts..."
bash run.sh
@echo "neutts run."

.PHONY: test
test: neutts
@echo "Testing neutts..."
bash test.sh
@echo "neutts tested."

.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py

.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__
162 changes: 162 additions & 0 deletions backend/python/neutts/backend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for NeuTTSAir
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
import backend_pb2
import backend_pb2_grpc
import torch
from neuttsair.neutts import NeuTTSAir
import soundfile as sf

import grpc

def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False

_ONE_DAY_IN_SECONDS = 60 * 60 * 24

# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))

# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
BackendServicer is the class that implements the gRPC service
"""
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):

# Get device
# device = "cuda" if request.CUDA else "cpu"
if torch.cuda.is_available():
print("CUDA is available", file=sys.stderr)
device = "cuda"
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")


options = request.Options

# empty dict
self.options = {}
self.ref_text = None

# The options are a list of strings in this form optname:optvalue
# We are storing all the options in a dict so we can use it later when
# generating the images
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":")
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value

codec_repo = "neuphonic/neucodec"
if "codec_repo" in self.options:
codec_repo = self.options["codec_repo"]
del self.options["codec_repo"]
if "ref_text" in self.options:
self.ref_text = self.options["ref_text"]
del self.options["ref_text"]

self.AudioPath = None

if os.path.isabs(request.AudioPath):
self.AudioPath = request.AudioPath
elif request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
# get base path of modelFile
modelFileBase = os.path.dirname(request.ModelFile)
# modify LoraAdapter to be relative to modelFileBase
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
try:
print("Preparing models, please wait", file=sys.stderr)
self.model = NeuTTSAir(backbone_repo=request.Model, backbone_device=device, codec_repo=codec_repo, codec_device=device)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service
# Replace this with your desired response
return backend_pb2.Result(message="Model loaded successfully", success=True)

def TTS(self, request, context):
try:
kwargs = {}

# add options to kwargs
kwargs.update(self.options)

ref_codes = self.model.encode_reference(self.AudioPath)

wav = self.model.infer(request.text, ref_codes, self.ref_text)

sf.write(request.dst, wav, 24000)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(success=True)

def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)

# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)

# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)

try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)

if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()

serve(args.addr)
33 changes: 33 additions & 0 deletions backend/python/neutts/install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#!/bin/bash
set -e

backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi

# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi

if [ "x${BUILD_TYPE}" == "xcublas" ] || [ "x${BUILD_TYPE}" == "xl4t" ]; then
export CMAKE_ARGS="-DGGML_CUDA=on"
fi

if [ "x${BUILD_TYPE}" == "xhipblas" ]; then
export CMAKE_ARGS="-DGGML_HIPBLAS=on"
fi

EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"

git clone https://github.com/neuphonic/neutts-air neutts-air

cp -rfv neutts-air/neuttsair ./

installRequirements
2 changes: 2 additions & 0 deletions backend/python/neutts/requirements-after.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
datasets==4.1.1
torchtune==0.6.1
Loading
Loading