18 lines
836 B
Python
18 lines
836 B
Python
import torch
|
|
from depth_anything_v2.dpt import DepthAnythingV2
|
|
|
|
def model_start(model_path):
|
|
DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
|
|
model_configs = {
|
|
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
|
|
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
|
|
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
|
|
'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
|
|
}
|
|
|
|
depth_anything = DepthAnythingV2()
|
|
depth_anything.load_state_dict(
|
|
torch.load(f'{model_path}', map_location='cpu'))
|
|
depth_anything = depth_anything.to(DEVICE).eval()
|
|
print("model loaded")
|
|
return depth_anything |