Skip to content

Commit

Permalink
feat: preload textual model
Browse files Browse the repository at this point in the history
  • Loading branch information
martabal committed Sep 25, 2024
1 parent d34d631 commit 59300d2
Show file tree
Hide file tree
Showing 10 changed files with 59 additions and 59 deletions.
13 changes: 9 additions & 4 deletions machine-learning/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
InferenceEntries,
InferenceEntry,
InferenceResponse,
LoadModelEntry,
MessageResponse,
ModelFormat,
ModelIdentity,
Expand Down Expand Up @@ -125,17 +124,16 @@ def get_entries(entries: str = Form()) -> InferenceEntries:
raise HTTPException(422, "Invalid request format.")


def get_entry(entries: str = Form()) -> LoadModelEntry:
def get_entry(entries: str = Form()) -> InferenceEntry:
try:
request: PipelineRequest = orjson.loads(entries)
for task, types in request.items():
for type, entry in types.items():
parsed: LoadModelEntry = {
parsed: InferenceEntry = {
"name": entry["modelName"],
"task": task,
"type": type,
"options": entry.get("options", {}),
"ttl": entry["ttl"] if "ttl" in entry else settings.ttl,
}
return parsed
except (orjson.JSONDecodeError, ValidationError, KeyError, AttributeError) as e:
Expand Down Expand Up @@ -163,6 +161,13 @@ async def load_model(entry: InferenceEntry = Depends(get_entry)) -> None:
return Response(status_code=200)


@app.post("/unload", response_model=TextResponse)
async def unload_model(entry: InferenceEntry = Depends(get_entry)) -> None:
await model_cache.unload(entry["name"], entry["type"], entry["task"])
print("unload")
return Response(status_code=200)


@app.post("/predict", dependencies=[Depends(update_state)])
async def predict(
entries: InferenceEntries = Depends(get_entries),
Expand Down
7 changes: 7 additions & 0 deletions machine-learning/app/models/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,10 @@ async def get_profiling(self) -> dict[str, float] | None:
async def revalidate(self, key: str, ttl: int | None) -> None:
if ttl is not None and key in self.cache._handlers:
await self.cache.expire(key, ttl)

async def unload(self, model_name: str, model_type: ModelType, model_task: ModelTask) -> None:
key = f"{model_name}{model_type}{model_task}"
async with OptimisticLock(self.cache, key):
value = await self.cache.get(key)
if value is not None:
await self.cache.delete(key)
11 changes: 0 additions & 11 deletions machine-learning/app/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,17 +109,6 @@ class InferenceEntry(TypedDict):
options: dict[str, Any]


class LoadModelEntry(InferenceEntry):
ttl: int

def __init__(self, name: str, task: ModelTask, type: ModelType, options: dict[str, Any], ttl: int):
super().__init__(name=name, task=task, type=type, options=options)

if ttl <= 0:
raise ValueError("ttl must be a positive integer")
self.ttl = ttl


InferenceEntries = tuple[list[InferenceEntry], list[InferenceEntry]]


Expand Down
10 changes: 2 additions & 8 deletions open-api/immich-openapi-specs.json
Original file line number Diff line number Diff line change
Expand Up @@ -5307,8 +5307,8 @@
"name": "password",
"required": false,
"in": "query",
"example": "password",
"schema": {
"example": "password",
"type": "string"
}
},
Expand Down Expand Up @@ -9510,16 +9510,10 @@
"properties": {
"enabled": {
"type": "boolean"
},
"ttl": {
"format": "int64",
"minimum": 0,
"type": "number"
}
},
"required": [
"enabled",
"ttl"
"enabled"
],
"type": "object"
},
Expand Down
2 changes: 0 additions & 2 deletions server/src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ export interface SystemConfig {
modelName: string;
loadTextualModelOnConnection: {
enabled: boolean;
ttl: number;
};
};
duplicateDetection: {
Expand Down Expand Up @@ -276,7 +275,6 @@ export const defaults = Object.freeze<SystemConfig>({
modelName: 'ViT-B-32__openai',
loadTextualModelOnConnection: {
enabled: false,
ttl: 300,
},
},
duplicateDetection: {
Expand Down
9 changes: 3 additions & 6 deletions server/src/dtos/model-config.dto.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,9 @@ export class ModelConfig extends TaskConfig {
modelName!: string;
}

export class LoadTextualModelOnConnection extends TaskConfig {
@IsNumber()
@Min(0)
@Type(() => Number)
@ApiProperty({ type: 'number', format: 'int64' })
ttl!: number;
export class LoadTextualModelOnConnection {
@ValidateBoolean()
enabled!: boolean;
}

export class CLIPConfig extends ModelConfig {
Expand Down
13 changes: 7 additions & 6 deletions server/src/interfaces/machine-learning.interface.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,13 @@ export type ModelPayload = { imagePath: string } | { text: string };

type ModelOptions = { modelName: string };

export interface LoadModelOptions extends ModelOptions {
ttl: number;
}

export type FaceDetectionOptions = ModelOptions & { minScore: number };

type VisualResponse = { imageHeight: number; imageWidth: number };
export type ClipVisualRequest = { [ModelTask.SEARCH]: { [ModelType.VISUAL]: ModelOptions } };
export type ClipVisualResponse = { [ModelTask.SEARCH]: number[] } & VisualResponse;

export type ClipTextualRequest = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: ModelOptions | LoadModelOptions } };
export type ClipTextualRequest = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: ModelOptions } };
export type ClipTextualResponse = { [ModelTask.SEARCH]: number[] };

export type FacialRecognitionRequest = {
Expand All @@ -50,6 +46,11 @@ export interface Face {
score: number;
}

export enum LoadTextModelActions {
LOAD,
UNLOAD,
}

export type FacialRecognitionResponse = { [ModelTask.FACIAL_RECOGNITION]: Face[] } & VisualResponse;
export type DetectedFaces = { faces: Face[] } & VisualResponse;
export type MachineLearningRequest = ClipVisualRequest | ClipTextualRequest | FacialRecognitionRequest;
Expand All @@ -58,5 +59,5 @@ export interface IMachineLearningRepository {
encodeImage(url: string, imagePath: string, config: ModelOptions): Promise<number[]>;
encodeText(url: string, text: string, config: ModelOptions): Promise<number[]>;
detectFaces(url: string, imagePath: string, config: FaceDetectionOptions): Promise<DetectedFaces>;
loadTextModel(url: string, config: ModelOptions): Promise<void>;
prepareTextModel(url: string, config: ModelOptions, action: LoadTextModelActions): Promise<void>;
}
24 changes: 22 additions & 2 deletions server/src/repositories/event.repository.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import {
ServerEventMap,
} from 'src/interfaces/event.interface';
import { ILoggerRepository } from 'src/interfaces/logger.interface';
import { IMachineLearningRepository } from 'src/interfaces/machine-learning.interface';
import { IMachineLearningRepository, LoadTextModelActions } from 'src/interfaces/machine-learning.interface';
import { ISystemMetadataRepository } from 'src/interfaces/system-metadata.interface';
import { AuthService } from 'src/services/auth.service';
import { Instrumentation } from 'src/utils/instrumentation';
Expand Down Expand Up @@ -79,7 +79,12 @@ export class EventRepository implements OnGatewayConnection, OnGatewayDisconnect
const { machineLearning } = await this.configCore.getConfig({ withCache: true });
if (machineLearning.clip.loadTextualModelOnConnection.enabled) {
try {
this.machineLearningRepository.loadTextModel(machineLearning.url, machineLearning.clip);
console.log(this.server);
this.machineLearningRepository.prepareTextModel(

Check failure on line 83 in server/src/repositories/event.repository.ts

View workflow job for this annotation

GitHub Actions / Test & Lint Server

Promises must be awaited, end with a call to .catch, end with a call to .then with a rejection handler or be explicitly marked as ignored with the `void` operator
machineLearning.url,
machineLearning.clip,
LoadTextModelActions.LOAD,
);
} catch (error) {
this.logger.warn(error);
}
Expand All @@ -100,6 +105,21 @@ export class EventRepository implements OnGatewayConnection, OnGatewayDisconnect
async handleDisconnect(client: Socket) {
this.logger.log(`Websocket Disconnect: ${client.id}`);
await client.leave(client.nsp.name);
if ('background' in client.handshake.query && client.handshake.query.background === 'false') {
const { machineLearning } = await this.configCore.getConfig({ withCache: true });
if (machineLearning.clip.loadTextualModelOnConnection.enabled && this.server?.engine.clientsCount == 0) {
try {
this.machineLearningRepository.prepareTextModel(

Check failure on line 112 in server/src/repositories/event.repository.ts

View workflow job for this annotation

GitHub Actions / Test & Lint Server

Promises must be awaited, end with a call to .catch, end with a call to .then with a rejection handler or be explicitly marked as ignored with the `void` operator
machineLearning.url,
machineLearning.clip,
LoadTextModelActions.UNLOAD,
);
this.logger.debug('sent request to unload text model');
} catch (error) {
this.logger.warn(error);
}
}
}
}

on<T extends EmitEvent>(event: T, handler: EmitHandler<T>): void {
Expand Down
12 changes: 9 additions & 3 deletions server/src/repositories/machine-learning.repository.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import {
FaceDetectionOptions,
FacialRecognitionResponse,
IMachineLearningRepository,
LoadTextModelActions,
MachineLearningRequest,
ModelPayload,
ModelTask,
Expand Down Expand Up @@ -38,11 +39,16 @@ export class MachineLearningRepository implements IMachineLearningRepository {
return res;
}

async loadTextModel(url: string, { modelName, loadTextualModelOnConnection: { ttl } }: CLIPConfig) {
private prepareTextModelUrl: Record<LoadTextModelActions, string> = {
[LoadTextModelActions.LOAD]: '/load',
[LoadTextModelActions.UNLOAD]: '/unload',
};

async prepareTextModel(url: string, { modelName }: CLIPConfig, actions: LoadTextModelActions) {
try {
const request = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: { modelName, ttl } } };
const request = { [ModelTask.SEARCH]: { [ModelType.TEXTUAL]: { modelName } } };
const formData = await this.getFormData(request);
const res = await this.fetchData(url, '/load', formData);
const res = await this.fetchData(url, this.prepareTextModelUrl[actions], formData);
if (res.status >= 400) {
throw new Error(`${errorPrefix} Loadings textual model failed with status ${res.status}: ${res.statusText}`);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,23 +88,6 @@
bind:checked={config.machineLearning.clip.loadTextualModelOnConnection.enabled}
disabled={disabled || !config.machineLearning.enabled || !config.machineLearning.clip.enabled}
/>

<hr />

<SettingInputField
inputType={SettingInputFieldType.NUMBER}
label={$t('admin.machine_learning_preload_model_ttl')}
bind:value={config.machineLearning.clip.loadTextualModelOnConnection.ttl}
step="1"
min={0}
desc={$t('admin.machine_learning_max_detection_distance_description')}
disabled={disabled ||
!config.machineLearning.enabled ||
!config.machineLearning.clip.enabled ||
!config.machineLearning.clip.loadTextualModelOnConnection.enabled}
isEdited={config.machineLearning.clip.loadTextualModelOnConnection.ttl !==
savedConfig.machineLearning.clip.loadTextualModelOnConnection.ttl}
/>
</div>
</SettingAccordion>
</div>
Expand Down

0 comments on commit 59300d2

Please sign in to comment.