Skip to content

Commit

Permalink
Merge branch 'refs/heads/feat/knowledge-admin-role' into deploy/dev
Browse files Browse the repository at this point in the history
* refs/heads/feat/knowledge-admin-role: (25 commits)
  feat: dataset operator permission
  feat: patch dataset when not partial_member_list refresh datasetp permission
  chore: remove port expose in docker compose (#5754)
  fix: langsmith message_trace end_user_data session_id error (#5759)
  fix: workflow trace none type error (#5758)
  fix: missing process data in parameter extractor (#5755)
  Docs/add docker dotenv notes (#5750)
  fix: slow sql of ops tracing (#5749)
  Update docker-compose.yaml (#5745)
  bump to 0.6.12-fix1 (#5743)
  fix: env SMTP_PORT is empty caused err when launching (#5742)
  fix: app config does not use empty string in the env (#5741)
  fix: couldn't log in or resetup after a failed setup (#5739)
  chore: support both $$ and $ latex format (#5723)
  Rename README to README.md (#5727)
  Chore/set entrypoint scripts permissions (#5726)
  add README for new docker/ directory (#5724)
  bump to 0.6.12 (#5712)
  Ignore new middleware.env docker file (#5715)
  fix: _convert_prompt_message_to_dict parameters err (#5716)
  ...

# Conflicts:
#	api/commands.py
  • Loading branch information
ZhouhaoJiang committed Jun 30, 2024
2 parents ebe65d6 + 7c80d25 commit b574a1e
Show file tree
Hide file tree
Showing 32 changed files with 504 additions and 252 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/api-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ jobs:
cp docker/.env.example docker/.env
cp docker/middleware.env.example docker/middleware.env
- name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh

- name: Set up Sandbox
uses: hoverkraft-tech/compose-action@v2.0.0
with:
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/db-migration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ jobs:
- name: Install dependencies
run: poetry install -C api

- name: Prepare middleware env
run: |
cd docker
cp middleware.env.example middleware.env
- name: Set up Middlewares
uses: hoverkraft-tech/compose-action@v2.0.0
with:
Expand Down
10 changes: 10 additions & 0 deletions .github/workflows/expose_service_ports.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash

yq eval '.services.weaviate.ports += ["8080:8080"]' -i docker/docker-compose.yaml
yq eval '.services.qdrant.ports += ["6333:6333"]' -i docker/docker-compose.yaml
yq eval '.services.chroma.ports += ["8000:8000"]' -i docker/docker-compose.yaml
yq eval '.services["milvus-standalone"].ports += ["19530:19530"]' -i docker/docker-compose.yaml
yq eval '.services.pgvector.ports += ["5433:5432"]' -i docker/docker-compose.yaml
yq eval '.services["pgvecto-rs"].ports += ["5431:5432"]' -i docker/docker-compose.yaml

echo "Ports exposed for sandbox, weaviate, qdrant, chroma, milvus, pgvector, pgvecto-rs."
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ docker/volumes/milvus/*
docker/volumes/chroma/*

docker/nginx/conf.d/default.conf
docker/middleware.env

sdks/python-client/build
sdks/python-client/dist
Expand Down
35 changes: 21 additions & 14 deletions api/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -593,17 +593,22 @@ def fix_app_site_missing():
"""
click.echo(click.style('Start fix app related site missing issue.', fg='green'))

failed_app_ids = []
while True:
try:
sql = """select apps.id as id from apps left join sites on sites.app_id=apps.id
sql = """select apps.id as id from apps left join sites on sites.app_id=apps.id
where sites.id is null limit 1000"""
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql))
with db.engine.begin() as conn:
rs = conn.execute(db.text(sql))

processed_count = 0
for i in rs:
processed_count += 1
app_id = str(i.id)

if app_id in failed_app_ids:
continue

processed_count = 0
for i in rs:
processed_count += 1
app_id = str(i.id)
try:
app = db.session.query(App).filter(App.id == app_id).first()
tenant = app.tenant
if tenant:
Expand All @@ -615,13 +620,15 @@ def fix_app_site_missing():
account = accounts[0]
print("Fix app {} related site missing issue.".format(app.id))
app_was_created.send(app, account=account)
except Exception as e:
failed_app_ids.append(app_id)
click.echo(click.style('Fix app {} related site missing issue failed!'.format(app_id), fg='red'))
logging.exception(f'Fix app related site missing issue failed, error: {e}')
continue

if not processed_count:
break

if not processed_count:
break
except Exception as e:
click.echo(click.style('Fix app related site missing issue failed!', fg='red'))
logging.exception(f'Fix app related site missing issue failed, error: {e}')
continue

click.echo(click.style('Congratulations! Fix app related site missing issue successful!', fg='green'))

Expand Down
1 change: 0 additions & 1 deletion api/configs/app_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ class DifyConfig(
# read from dotenv format config file
env_file='.env',
env_file_encoding='utf-8',
env_ignore_empty=True,

# ignore extra attributes
extra='ignore',
Expand Down
2 changes: 1 addition & 1 deletion api/configs/packaging/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ class PackagingInfo(BaseModel):

CURRENT_VERSION: str = Field(
description='Dify version',
default='0.6.11',
default='0.6.12-fix1',
)

COMMIT_SHA: str = Field(
Expand Down
11 changes: 8 additions & 3 deletions api/controllers/console/datasets/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def post(self):
args = parser.parse_args()

# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
if not current_user.is_dataset_editing:
if not current_user.is_editor:
raise Forbidden()

try:
Expand Down Expand Up @@ -213,8 +213,9 @@ def patch(self, dataset_id):
parser.add_argument('partial_member_list', type=list, location='json', help='Invalid parent user list.')
args = parser.parse_args()
data = request.get_json()

# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editing:
if not current_user.is_editor or current_user.is_dataset_operator:
raise Forbidden()

dataset = DatasetService.update_dataset(
Expand All @@ -229,6 +230,10 @@ def patch(self, dataset_id):
DatasetPermissionService.update_partial_member_list(dataset_id_str, data.get('partial_member_list'))
part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
result_data.update({'partial_member_list': part_users_list})
else:
partial_member_list = []
DatasetPermissionService.update_partial_member_list(dataset_id_str, partial_member_list)
result_data.update({'partial_member_list': partial_member_list})

return result_data, 200

Expand All @@ -239,7 +244,7 @@ def delete(self, dataset_id):
dataset_id_str = str(dataset_id)

# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
if not current_user.is_editor or current_user.is_dataset_operator:
raise Forbidden()

try:
Expand Down
7 changes: 6 additions & 1 deletion api/controllers/console/datasets/datasets_document.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def post(self, dataset_id):
raise NotFound('Dataset not found.')

# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
if not current_user.is_dataset_editor:
raise Forbidden()

try:
Expand Down Expand Up @@ -294,6 +294,11 @@ def post(self):
parser.add_argument('retrieval_model', type=dict, required=False, nullable=False,
location='json')
args = parser.parse_args()

# The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
if not current_user.is_dataset_editor:
raise Forbidden()

if args['indexing_technique'] == 'high_quality':
try:
model_manager = ModelManager()
Expand Down
22 changes: 5 additions & 17 deletions api/controllers/console/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,10 @@
from flask import current_app, request
from flask_restful import Resource, reqparse

from extensions.ext_database import db
from libs.helper import email, get_remote_ip, str_len
from libs.password import valid_password
from models.model import DifySetup
from services.account_service import AccountService, RegisterService, TenantService
from services.account_service import RegisterService, TenantService

from . import api
from .error import AlreadySetupError, NotInitValidateError, NotSetupError
Expand Down Expand Up @@ -51,28 +50,17 @@ def post(self):
required=True, location='json')
args = parser.parse_args()

# Register
account = RegisterService.register(
# setup
RegisterService.setup(
email=args['email'],
name=args['name'],
password=args['password']
password=args['password'],
ip_address=get_remote_ip(request)
)

TenantService.create_owner_tenant_if_not_exist(account)

setup()
AccountService.update_last_login(account, ip_address=get_remote_ip(request))

return {'result': 'success'}, 201


def setup():
dify_setup = DifySetup(
version=current_app.config['CURRENT_VERSION']
)
db.session.add(dify_setup)


def setup_required(view):
@wraps(view)
def decorated(*args, **kwargs):
Expand Down
13 changes: 7 additions & 6 deletions api/core/app/task_pipeline/workflow_cycle_manage.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,13 +167,14 @@ def _workflow_run_failed(
db.session.refresh(workflow_run)
db.session.close()

trace_manager.add_trace_task(
TraceTask(
TraceTaskName.WORKFLOW_TRACE,
workflow_run=workflow_run,
conversation_id=conversation_id,
if trace_manager:
trace_manager.add_trace_task(
TraceTask(
TraceTaskName.WORKFLOW_TRACE,
workflow_run=workflow_run,
conversation_id=conversation_id,
)
)
)

return workflow_run

Expand Down
2 changes: 1 addition & 1 deletion api/core/model_runtime/model_providers/moonshot/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _add_function_call(self, model: str, credentials: dict) -> None:
}.intersection(model_schema.features or []):
credentials['function_calling_type'] = 'tool_call'

def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict:
def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict:
"""
Convert PromptMessage to dict for OpenAI API format
"""
Expand Down
2 changes: 1 addition & 1 deletion api/core/model_runtime/model_providers/nvidia/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def _generate(self, model: str, credentials: dict, prompt_messages: list[PromptM
endpoint_url = str(URL(endpoint_url) / 'chat' / 'completions')
elif 'server_url' in credentials:
endpoint_url = server_url
data['messages'] = [self._convert_prompt_message_to_dict(m) for m in prompt_messages]
data['messages'] = [self._convert_prompt_message_to_dict(m, credentials) for m in prompt_messages]
elif completion_type is LLMMode.COMPLETION:
data['prompt'] = 'ping'
if 'endpoint_url' in credentials:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ def _handle_generate_response(self, model: str, credentials: dict, response: req

return result

def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: dict = None) -> dict:
def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict:
"""
Convert PromptMessage to dict for OpenAI API format
"""
Expand Down
8 changes: 6 additions & 2 deletions api/core/ops/langfuse_trace/langfuse_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,9 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
node_type = node_execution.node_type
status = node_execution.status
if node_type == "llm":
inputs = json.loads(node_execution.process_data).get("prompts", {})
inputs = json.loads(node_execution.process_data).get(
"prompts", {}
) if node_execution.process_data else {}
else:
inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
outputs = (
Expand Down Expand Up @@ -213,7 +215,9 @@ def message_trace(
end_user_data: EndUser = db.session.query(EndUser).filter(
EndUser.id == message_data.from_end_user_id
).first()
user_id = end_user_data.session_id
if end_user_data is not None:
user_id = end_user_data.session_id
metadata["user_id"] = user_id

trace_data = LangfuseTrace(
id=message_id,
Expand Down
14 changes: 9 additions & 5 deletions api/core/ops/langsmith_trace/langsmith_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
node_type = node_execution.node_type
status = node_execution.status
if node_type == "llm":
inputs = json.loads(node_execution.process_data).get("prompts", {})
inputs = json.loads(node_execution.process_data).get(
"prompts", {}
) if node_execution.process_data else {}
else:
inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
outputs = (
Expand Down Expand Up @@ -181,13 +183,15 @@ def message_trace(self, trace_info: MessageTraceInfo):
message_id = message_data.id

user_id = message_data.from_account_id
metadata["user_id"] = user_id

if message_data.from_end_user_id:
end_user_data: EndUser = db.session.query(EndUser).filter(
EndUser.id == message_data.from_end_user_id
).first().session_id
end_user_id = end_user_data.session_id
metadata["end_user_id"] = end_user_id
metadata["user_id"] = user_id
).first()
if end_user_data is not None:
end_user_id = end_user_data.session_id
metadata["end_user_id"] = end_user_id

message_run = LangSmithRunModel(
input_tokens=trace_info.message_tokens,
Expand Down
11 changes: 9 additions & 2 deletions api/core/ops/ops_trace_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,10 +352,17 @@ def workflow_trace(self, workflow_run: WorkflowRun, conversation_id):
query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or ""

# get workflow_app_log_id
workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by(workflow_run_id=workflow_run.id).first()
workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by(
tenant_id=tenant_id,
app_id=workflow_run.app_id,
workflow_run_id=workflow_run.id
).first()
workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None
# get message_id
message_data = db.session.query(Message.id).filter_by(workflow_run_id=workflow_run_id).first()
message_data = db.session.query(Message.id).filter_by(
conversation_id=conversation_id,
workflow_run_id=workflow_run_id
).first()
message_id = str(message_data.id) if message_data else None

metadata = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult:
return NodeRunResult(
status=WorkflowNodeExecutionStatus.FAILED,
inputs=inputs,
process_data={},
process_data=process_data,
outputs={
'__is_success': 0,
'__reason': str(e)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
"""add workflow_run_id index for message
Revision ID: b2602e131636
Revises: 63f9175e515b
Create Date: 2024-06-29 12:16:51.646346
"""
from alembic import op

import models as models

# revision identifiers, used by Alembic.
revision = 'b2602e131636'
down_revision = '63f9175e515b'
branch_labels = None
depends_on = None


def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('messages', schema=None) as batch_op:
batch_op.create_index('message_workflow_run_id_idx', ['conversation_id', 'workflow_run_id'], unique=False)

# ### end Alembic commands ###


def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('messages', schema=None) as batch_op:
batch_op.drop_index('message_workflow_run_id_idx')

# ### end Alembic commands ###
Loading

0 comments on commit b574a1e

Please sign in to comment.