@@ -82,6 +82,53 @@ export const bm25s = (model: ModelData): string[] => [
82
82
retriever = BM25HF.load_from_hub("${ model . id } ")` ,
83
83
] ;
84
84
85
+ export const depth_anything_v2 = ( model : ModelData ) : string [ ] => {
86
+ let encoder : string ;
87
+ let features : string ;
88
+ let out_channels : string ;
89
+
90
+ encoder = "<ENCODER>" ;
91
+ features = "<NUMBER_OF_FEATURES>" ;
92
+ out_channels = "<OUT_CHANNELS>" ;
93
+
94
+ if ( model . id === "depth-anything/Depth-Anything-V2-Small" ) {
95
+ encoder = "vits" ;
96
+ features = "64" ;
97
+ out_channels = "[48, 96, 192, 384]" ;
98
+ } else if ( model . id === "depth-anything/Depth-Anything-V2-Base" ) {
99
+ encoder = "vitb" ;
100
+ features = "128" ;
101
+ out_channels = "[96, 192, 384, 768]" ;
102
+ } else if ( model . id === "depth-anything/Depth-Anything-V2-Large" ) {
103
+ encoder = "vitl" ;
104
+ features = "256" ;
105
+ out_channels = "[256, 512, 1024, 1024" ;
106
+ }
107
+
108
+ return [
109
+ `
110
+ # Install from https://github.com/DepthAnything/Depth-Anything-V2
111
+
112
+ # Load the model and infer depth from an image
113
+ import cv2
114
+ import torch
115
+
116
+ from depth_anything_v2.dpt import DepthAnythingV2
117
+
118
+ # instantiate the model
119
+ model = DepthAnythingV2(encoder="${ encoder } ", features=${ features } , out_channels=${ out_channels } )
120
+
121
+ # load the weights
122
+ filepath = hf_hub_download(repo_id="${ model . id } ", filename="depth_anything_v2_${ encoder } .pth", repo_type="model")
123
+ state_dict = torch.load(filepath, map_location="cpu")
124
+ model.load_state_dict(state_dict).eval()
125
+
126
+ raw_img = cv2.imread("your/image/path")
127
+ depth = model.infer_image(raw_img) # HxW raw depth map in numpy
128
+ ` ,
129
+ ] ;
130
+ } ;
131
+
85
132
const diffusers_default = ( model : ModelData ) => [
86
133
`from diffusers import DiffusionPipeline
87
134
0 commit comments