mirror of
https://github.com/makeplane/plane
synced 2025-08-07 19:59:33 +00:00
Compare commits
3 Commits
refactor/e
...
feat-ask-A
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33b125b5b9 | ||
|
|
d7a2a09191 | ||
|
|
e62704db55 |
@@ -1,3 +1,4 @@
|
||||
from .ai import urlpatterns as ai_urls
|
||||
from .analytic import urlpatterns as analytic_urls
|
||||
from .api import urlpatterns as api_urls
|
||||
from .asset import urlpatterns as asset_urls
|
||||
@@ -19,6 +20,7 @@ from .webhook import urlpatterns as webhook_urls
|
||||
from .workspace import urlpatterns as workspace_urls
|
||||
|
||||
urlpatterns = [
|
||||
*ai_urls,
|
||||
*analytic_urls,
|
||||
*asset_urls,
|
||||
*cycle_urls,
|
||||
|
||||
12
apiserver/plane/app/urls/ai.py
Normal file
12
apiserver/plane/app/urls/ai.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from django.urls import path
|
||||
|
||||
|
||||
from plane.app.views import AskAIEndpoint
|
||||
|
||||
urlpatterns = [
|
||||
path(
|
||||
"workspaces/<str:slug>/ask-AI/",
|
||||
AskAIEndpoint.as_view(),
|
||||
name="askAI",
|
||||
),
|
||||
]
|
||||
@@ -2,23 +2,11 @@ from django.urls import path
|
||||
|
||||
|
||||
from plane.app.views import UnsplashEndpoint
|
||||
from plane.app.views import GPTIntegrationEndpoint, WorkspaceGPTIntegrationEndpoint
|
||||
|
||||
|
||||
urlpatterns = [
|
||||
path(
|
||||
"unsplash/",
|
||||
UnsplashEndpoint.as_view(),
|
||||
name="unsplash",
|
||||
),
|
||||
path(
|
||||
"workspaces/<str:slug>/projects/<uuid:project_id>/ai-assistant/",
|
||||
GPTIntegrationEndpoint.as_view(),
|
||||
name="importer",
|
||||
),
|
||||
path(
|
||||
"workspaces/<str:slug>/ai-assistant/",
|
||||
WorkspaceGPTIntegrationEndpoint.as_view(),
|
||||
name="importer",
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
@@ -191,12 +191,10 @@ from .page.version import PageVersionEndpoint
|
||||
from .search.base import GlobalSearchEndpoint
|
||||
from .search.issue import IssueSearchEndpoint
|
||||
|
||||
from .external.base import UnsplashEndpoint
|
||||
|
||||
from .ai.base import AskAIEndpoint
|
||||
|
||||
from .external.base import (
|
||||
GPTIntegrationEndpoint,
|
||||
UnsplashEndpoint,
|
||||
WorkspaceGPTIntegrationEndpoint,
|
||||
)
|
||||
from .estimate.base import (
|
||||
ProjectEstimatePointEndpoint,
|
||||
BulkEstimatePointEndpoint,
|
||||
|
||||
109
apiserver/plane/app/views/ai/base.py
Normal file
109
apiserver/plane/app/views/ai/base.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# Python imports
|
||||
import os
|
||||
from enum import Enum
|
||||
|
||||
|
||||
# Third party imports
|
||||
from openai import OpenAI
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
# Django imports
|
||||
|
||||
# Module imports
|
||||
from ..base import BaseAPIView
|
||||
from plane.app.permissions import (
|
||||
WorkspaceEntityPermission,
|
||||
)
|
||||
from plane.license.utils.instance_value import get_configuration_value
|
||||
|
||||
|
||||
class Task(Enum):
|
||||
ASK_AI = "ASK_AI"
|
||||
|
||||
|
||||
class AskAIEndpoint(BaseAPIView):
|
||||
permission_classes = [
|
||||
WorkspaceEntityPermission,
|
||||
]
|
||||
|
||||
def get_system_prompt(self, task):
|
||||
if task == Task.ASK_AI.value:
|
||||
return (
|
||||
True,
|
||||
"""
|
||||
You are an advanced AI assistant designed to provide optimal responses by integrating given context with your broad knowledge base. Your primary objectives are:
|
||||
|
||||
1. Thoroughly analyze and understand the provided context, which may include context, specific questions, code snippets, or any relevant information.
|
||||
2. Treat the given context as a critical input, using it to inform and guide your response.
|
||||
3. Leverage your extensive knowledge to complement and enhance your understanding of the context and to provide comprehensive, accurate answers.
|
||||
4. Seamlessly blend insights from the given context with your general knowledge, ensuring a cohesive and informative response.
|
||||
5. Adapt your response style and depth based on the nature of the context and the question asked.
|
||||
6. When dealing with code or technical context, provide explanations or solutions that are directly relevant and technically sound.
|
||||
7. Maintain clarity and conciseness in your responses while ensuring they are complete and informative.
|
||||
8. Use appropriate HTML tags for formatting only when it enhances readability or structure of the response.
|
||||
9. Respect privacy and avoid sensationalism when addressing sensitive topics.
|
||||
|
||||
Your goal is to deliver the most relevant, accurate, and helpful response possible, considering both the provided content and your broader understanding.
|
||||
""",
|
||||
)
|
||||
else:
|
||||
return False, {
|
||||
"error": "Invalid task. Please provide a correct task name."
|
||||
}
|
||||
|
||||
def post(self, request, slug):
|
||||
task = request.data.get("task", "ASK_AI")
|
||||
context = request.data.get("text_input", "")
|
||||
user_prompt = request.data.get("query", "")
|
||||
|
||||
if not context or not user_prompt:
|
||||
return Response(
|
||||
{"error": "Query and Text input are required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
|
||||
[
|
||||
{
|
||||
"key": "OPENAI_API_KEY",
|
||||
"default": os.environ.get("OPENAI_API_KEY", None),
|
||||
},
|
||||
{
|
||||
"key": "GPT_ENGINE",
|
||||
"default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
if not OPENAI_API_KEY or not GPT_ENGINE:
|
||||
return Response(
|
||||
{"error": "OpenAI API key and engine are required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
client = OpenAI(api_key=OPENAI_API_KEY)
|
||||
|
||||
processed, system_prompt = self.get_system_prompt(task)
|
||||
if not processed:
|
||||
return Response(system_prompt, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
try:
|
||||
completion = client.chat.completions.create(
|
||||
model=GPT_ENGINE,
|
||||
messages=[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Context:\n\n{context}\n\nQuestion: {user_prompt}",
|
||||
},
|
||||
],
|
||||
temperature=0.1,
|
||||
)
|
||||
response = completion.choices[0].message.content.strip()
|
||||
return Response({"response": response}, status=status.HTTP_200_OK)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{"error": f"An error occurred: {str(e)}"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
129
apiserver/plane/app/views/external/base.py
vendored
129
apiserver/plane/app/views/external/base.py
vendored
@@ -2,8 +2,8 @@
|
||||
import requests
|
||||
import os
|
||||
|
||||
|
||||
# Third party imports
|
||||
from openai import OpenAI
|
||||
from rest_framework.response import Response
|
||||
from rest_framework import status
|
||||
|
||||
@@ -11,136 +11,9 @@ from rest_framework import status
|
||||
|
||||
# Module imports
|
||||
from ..base import BaseAPIView
|
||||
from plane.app.permissions import ProjectEntityPermission, WorkspaceEntityPermission
|
||||
from plane.db.models import Workspace, Project
|
||||
from plane.app.serializers import (
|
||||
ProjectLiteSerializer,
|
||||
WorkspaceLiteSerializer,
|
||||
)
|
||||
from plane.license.utils.instance_value import get_configuration_value
|
||||
|
||||
|
||||
class GPTIntegrationEndpoint(BaseAPIView):
|
||||
permission_classes = [
|
||||
ProjectEntityPermission,
|
||||
]
|
||||
|
||||
def post(self, request, slug, project_id):
|
||||
OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
|
||||
[
|
||||
{
|
||||
"key": "OPENAI_API_KEY",
|
||||
"default": os.environ.get("OPENAI_API_KEY", None),
|
||||
},
|
||||
{
|
||||
"key": "GPT_ENGINE",
|
||||
"default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
# Get the configuration value
|
||||
# Check the keys
|
||||
if not OPENAI_API_KEY or not GPT_ENGINE:
|
||||
return Response(
|
||||
{"error": "OpenAI API key and engine is required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
prompt = request.data.get("prompt", False)
|
||||
task = request.data.get("task", False)
|
||||
|
||||
if not task:
|
||||
return Response(
|
||||
{"error": "Task is required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
final_text = task + "\n" + prompt
|
||||
|
||||
client = OpenAI(
|
||||
api_key=OPENAI_API_KEY,
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=GPT_ENGINE,
|
||||
messages=[{"role": "user", "content": final_text}],
|
||||
)
|
||||
|
||||
workspace = Workspace.objects.get(slug=slug)
|
||||
project = Project.objects.get(pk=project_id)
|
||||
|
||||
text = response.choices[0].message.content.strip()
|
||||
text_html = text.replace("\n", "<br/>")
|
||||
return Response(
|
||||
{
|
||||
"response": text,
|
||||
"response_html": text_html,
|
||||
"project_detail": ProjectLiteSerializer(project).data,
|
||||
"workspace_detail": WorkspaceLiteSerializer(workspace).data,
|
||||
},
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
|
||||
class WorkspaceGPTIntegrationEndpoint(BaseAPIView):
|
||||
permission_classes = [
|
||||
WorkspaceEntityPermission,
|
||||
]
|
||||
|
||||
def post(self, request, slug):
|
||||
OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
|
||||
[
|
||||
{
|
||||
"key": "OPENAI_API_KEY",
|
||||
"default": os.environ.get("OPENAI_API_KEY", None),
|
||||
},
|
||||
{
|
||||
"key": "GPT_ENGINE",
|
||||
"default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
# Get the configuration value
|
||||
# Check the keys
|
||||
if not OPENAI_API_KEY or not GPT_ENGINE:
|
||||
return Response(
|
||||
{"error": "OpenAI API key and engine is required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
prompt = request.data.get("prompt", False)
|
||||
task = request.data.get("task", False)
|
||||
|
||||
if not task:
|
||||
return Response(
|
||||
{"error": "Task is required"},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
|
||||
final_text = task + "\n" + prompt
|
||||
|
||||
client = OpenAI(
|
||||
api_key=OPENAI_API_KEY,
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=GPT_ENGINE,
|
||||
messages=[{"role": "user", "content": final_text}],
|
||||
)
|
||||
|
||||
text = response.choices[0].message.content.strip()
|
||||
text_html = text.replace("\n", "<br/>")
|
||||
return Response(
|
||||
{
|
||||
"response": text,
|
||||
"response_html": text_html,
|
||||
},
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
|
||||
class UnsplashEndpoint(BaseAPIView):
|
||||
def get(self, request):
|
||||
(UNSPLASH_ACCESS_KEY,) = get_configuration_value(
|
||||
|
||||
Reference in New Issue
Block a user