mirror of
https://github.com/khoj-ai/khoj.git
synced 2025-02-17 08:04:21 +00:00
Use MPS on Apple Mac M1 to GPU accelerate Encode, Query Performance
- Note: Support for MPS in Pytorch is currently in v1.13.0 nightly builds - Users will have to wait for PyTorch MPS support to land in stable builds - Until then the code can be tweaked and tested to make use of the GPU acceleration on newer Macs
This commit is contained in:
parent
7de9c58a1c
commit
acc9091260
1 changed files with 12 additions and 2 deletions
|
@ -1,3 +1,5 @@
|
|||
# Standard Packages
|
||||
from packaging import version
|
||||
# External Packages
|
||||
import torch
|
||||
from pathlib import Path
|
||||
|
@ -12,7 +14,15 @@ model = SearchModels()
|
|||
processor_config = ProcessorConfigModel()
|
||||
config_file: Path = ""
|
||||
verbose: int = 0
|
||||
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") # Set device to GPU if available
|
||||
host: str = None
|
||||
port: int = None
|
||||
cli_args = None
|
||||
cli_args = None
|
||||
|
||||
if torch.cuda.is_available():
|
||||
# Use CUDA GPU
|
||||
device = torch.device("cuda:0")
|
||||
elif version.parse(torch.__version__) >= version.parse("1.13.0.dev") and torch.backends.mps.is_available():
|
||||
# Use Apple M1 Metal Acceleration
|
||||
device = torch.device("mps")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
|
|
Loading…
Add table
Reference in a new issue