Compare commits

..

1 Commits

Author SHA1 Message Date
Aaryan Khandelwal
5f24c0f838 style: pages edit history components 2024-04-16 15:09:48 +05:30
6042 changed files with 192880 additions and 352349 deletions

View File

@@ -2,7 +2,6 @@
*.pyc
.env
venv
.venv
node_modules/
**/node_modules/
npm-debug.log
@@ -15,4 +14,4 @@ build/
out/
**/out/
dist/
**/dist/
**/dist/

View File

@@ -8,13 +8,6 @@ PGDATA="/var/lib/postgresql/data"
REDIS_HOST="plane-redis"
REDIS_PORT="6379"
# RabbitMQ Settings
RABBITMQ_HOST="plane-mq"
RABBITMQ_PORT="5672"
RABBITMQ_USER="plane"
RABBITMQ_PASSWORD="plane"
RABBITMQ_VHOST="plane"
# AWS Settings
AWS_REGION=""
AWS_ACCESS_KEY_ID="access-key"
@@ -38,9 +31,3 @@ USE_MINIO=1
# Nginx Configuration
NGINX_PORT=80
# Force HTTPS for handling SSL Termination
MINIO_ENDPOINT_SSL=0
# API key rate limit
API_KEY_RATE_LIMIT="60/minute"

10
.eslintrc.js Normal file
View File

@@ -0,0 +1,10 @@
module.exports = {
root: true,
// This tells ESLint to load the config from the package `eslint-config-custom`
extends: ["custom"],
settings: {
next: {
rootDir: ["web/", "space/"],
},
},
};

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
*.sh text eol=lf

View File

@@ -2,7 +2,7 @@ name: Bug report
description: Create a bug report to help us improve Plane
title: "[bug]: "
labels: [🐛bug]
assignees: [vihar, pushya22]
assignees: [srinivaspendem, pushya22]
body:
- type: markdown
attributes:
@@ -55,19 +55,12 @@ body:
- Safari
- Other
- type: dropdown
id: variant
id: version
attributes:
label: Variant
label: Version
options:
- Cloud
- Self-hosted
- Local
validations:
required: true
- type: input
id: version
attributes:
label: Version
placeholder: v0.17.0-dev
validations:
required: true

View File

@@ -2,7 +2,7 @@ name: Feature request
description: Suggest a feature to improve Plane
title: "[feature]: "
labels: [✨feature]
assignees: [vihar, pushya22]
assignees: [srinivaspendem, pushya22]
body:
- type: markdown
attributes:

View File

@@ -1,20 +0,0 @@
### Description
<!-- Provide a detailed description of the changes in this PR -->
### Type of Change
<!-- Put an 'x' in the boxes that apply -->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] Feature (non-breaking change which adds functionality)
- [ ] Improvement (change that would cause existing functionality to not work as expected)
- [ ] Code refactoring
- [ ] Performance improvements
- [ ] Documentation update
### Screenshots and Media (if applicable)
<!-- Add screenshots to help explain your changes, ideally showcasing before and after -->
### Test Scenarios
<!-- Please describe the tests that you ran to verify your changes -->
### References
<!-- Link related issues if there are any -->

84
.github/workflows/auto-merge.yml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: Auto Merge or Create PR on Push
on:
workflow_dispatch:
push:
branches:
- "sync/**"
env:
CURRENT_BRANCH: ${{ github.ref_name }}
SOURCE_BRANCH: ${{ secrets.SYNC_SOURCE_BRANCH_NAME }} # The sync branch such as "sync/ce"
TARGET_BRANCH: ${{ secrets.SYNC_TARGET_BRANCH_NAME }} # The target branch that you would like to merge changes like develop
GITHUB_TOKEN: ${{ secrets.ACCESS_TOKEN }} # Personal access token required to modify contents and workflows
REVIEWER: ${{ secrets.SYNC_PR_REVIEWER }}
jobs:
Check_Branch:
runs-on: ubuntu-latest
outputs:
BRANCH_MATCH: ${{ steps.check-branch.outputs.MATCH }}
steps:
- name: Check if current branch matches the secret
id: check-branch
run: |
if [ "$CURRENT_BRANCH" = "$SOURCE_BRANCH" ]; then
echo "MATCH=true" >> $GITHUB_OUTPUT
else
echo "MATCH=false" >> $GITHUB_OUTPUT
fi
Auto_Merge:
if: ${{ needs.Check_Branch.outputs.BRANCH_MATCH == 'true' }}
needs: [Check_Branch]
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4.1.1
with:
fetch-depth: 0 # Fetch all history for all branches and tags
- name: Setup Git
run: |
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
- name: Setup GH CLI and Git Config
run: |
type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y)
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null
sudo apt update
sudo apt install gh -y
- name: Check for merge conflicts
id: conflicts
run: |
git fetch origin $TARGET_BRANCH
git checkout $TARGET_BRANCH
# Attempt to merge the main branch into the current branch
if $(git merge --no-commit --no-ff $SOURCE_BRANCH); then
echo "No merge conflicts detected."
echo "HAS_CONFLICTS=false" >> $GITHUB_ENV
else
echo "Merge conflicts detected."
echo "HAS_CONFLICTS=true" >> $GITHUB_ENV
git merge --abort
fi
- name: Merge Change to Target Branch
if: env.HAS_CONFLICTS == 'false'
run: |
git commit -m "Merge branch '$SOURCE_BRANCH' into $TARGET_BRANCH"
git push origin $TARGET_BRANCH
- name: Create PR to Target Branch
if: env.HAS_CONFLICTS == 'true'
run: |
# Replace 'username' with the actual GitHub username of the reviewer.
PR_URL=$(gh pr create --base $TARGET_BRANCH --head $SOURCE_BRANCH --title "sync: merge conflicts need to be resolved" --body "" --reviewer $REVIEWER)
echo "Pull Request created: $PR_URL"

View File

@@ -1,139 +0,0 @@
name: Build AIO Base Image
on:
workflow_dispatch:
inputs:
base_tag_name:
description: 'Base Tag Name'
required: false
default: ''
env:
TARGET_BRANCH: ${{ github.ref_name }}
jobs:
base_build_setup:
name: Build Preparation
runs-on: ubuntu-latest
outputs:
gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }}
gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }}
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
image_tag: ${{ steps.set_env_variables.outputs.IMAGE_TAG }}
steps:
- id: set_env_variables
name: Set Environment Variables
run: |
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
if [ "${{ github.event.inputs.base_tag_name }}" != "" ]; then
echo "IMAGE_TAG=${{ github.event.inputs.base_tag_name }}" >> $GITHUB_OUTPUT
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
echo "IMAGE_TAG=latest" >> $GITHUB_OUTPUT
elif [ "${{ env.TARGET_BRANCH }}" == "preview" ]; then
echo "IMAGE_TAG=preview" >> $GITHUB_OUTPUT
else
echo "IMAGE_TAG=develop" >> $GITHUB_OUTPUT
fi
if [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT
else
echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
fi
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
full_base_build_push:
runs-on: ubuntu-latest
needs: [base_build_setup]
env:
BASE_IMG_TAG: makeplane/plane-aio-base:full-${{ needs.base_build_setup.outputs.image_tag }}
BUILDX_DRIVER: ${{ needs.base_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.base_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.base_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.base_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Check out the repo
uses: actions/checkout@v4
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v6.9.0
with:
context: ./aio
file: ./aio/Dockerfile-base-full
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.BASE_IMG_TAG }}
push: true
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
slim_base_build_push:
runs-on: ubuntu-latest
needs: [base_build_setup]
env:
BASE_IMG_TAG: makeplane/plane-aio-base:slim-${{ needs.base_build_setup.outputs.image_tag }}
BUILDX_DRIVER: ${{ needs.base_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.base_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.base_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.base_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Check out the repo
uses: actions/checkout@v4
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v6.9.0
with:
context: ./aio
file: ./aio/Dockerfile-base-slim
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.BASE_IMG_TAG }}
push: true
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}

View File

@@ -1,207 +0,0 @@
name: Branch Build AIO
on:
workflow_dispatch:
inputs:
full:
description: 'Run full build'
type: boolean
required: false
default: false
slim:
description: 'Run slim build'
type: boolean
required: false
default: false
base_tag_name:
description: 'Base Tag Name'
required: false
default: ''
release:
types: [released, prereleased]
env:
TARGET_BRANCH: ${{ github.ref_name || github.event.release.target_commitish }}
FULL_BUILD_INPUT: ${{ github.event.inputs.full }}
SLIM_BUILD_INPUT: ${{ github.event.inputs.slim }}
jobs:
branch_build_setup:
name: Build Setup
runs-on: ubuntu-latest
outputs:
gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }}
flat_branch_name: ${{ steps.set_env_variables.outputs.FLAT_BRANCH_NAME }}
gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }}
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
aio_base_tag: ${{ steps.set_env_variables.outputs.AIO_BASE_TAG }}
do_full_build: ${{ steps.set_env_variables.outputs.DO_FULL_BUILD }}
do_slim_build: ${{ steps.set_env_variables.outputs.DO_SLIM_BUILD }}
steps:
- id: set_env_variables
name: Set Environment Variables
run: |
if [ "${{ env.TARGET_BRANCH }}" == "master" ] || [ "${{ github.event_name }}" == "release" ]; then
echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT
echo "AIO_BASE_TAG=latest" >> $GITHUB_OUTPUT
else
echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
if [ "${{ github.event_name}}" == "workflow_dispatch" ] && [ "${{ github.event.inputs.base_tag_name }}" != "" ]; then
echo "AIO_BASE_TAG=${{ github.event.inputs.base_tag_name }}" >> $GITHUB_OUTPUT
elif [ "${{ env.TARGET_BRANCH }}" == "preview" ]; then
echo "AIO_BASE_TAG=preview" >> $GITHUB_OUTPUT
else
echo "AIO_BASE_TAG=develop" >> $GITHUB_OUTPUT
fi
fi
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
if [ "${{ env.FULL_BUILD_INPUT }}" == "true" ] || [ "${{github.event_name}}" == "push" ] || [ "${{github.event_name}}" == "release" ]; then
echo "DO_FULL_BUILD=true" >> $GITHUB_OUTPUT
else
echo "DO_FULL_BUILD=false" >> $GITHUB_OUTPUT
fi
if [ "${{ env.SLIM_BUILD_INPUT }}" == "true" ] || [ "${{github.event_name}}" == "push" ] || [ "${{github.event_name}}" == "release" ]; then
echo "DO_SLIM_BUILD=true" >> $GITHUB_OUTPUT
else
echo "DO_SLIM_BUILD=false" >> $GITHUB_OUTPUT
fi
FLAT_BRANCH_NAME=$(echo "${{ env.TARGET_BRANCH }}" | sed 's/[^a-zA-Z0-9]/-/g')
echo "FLAT_BRANCH_NAME=$FLAT_BRANCH_NAME" >> $GITHUB_OUTPUT
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
full_build_push:
if: ${{ needs.branch_build_setup.outputs.do_full_build == 'true' }}
runs-on: ubuntu-22.04
needs: [branch_build_setup]
env:
BUILD_TYPE: full
AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }}
AIO_IMAGE_TAGS: makeplane/plane-aio:full-${{ needs.branch_build_setup.outputs.flat_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-stable,makeplane/plane-aio:${{env.BUILD_TYPE}}-${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-latest
else
TAG=${{ env.AIO_IMAGE_TAGS }}
fi
echo "AIO_IMAGE_TAGS=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v6.9.0
with:
context: .
file: ./aio/Dockerfile-app
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.AIO_IMAGE_TAGS }}
push: true
build-args: |
BASE_TAG=${{ env.AIO_BASE_TAG }}
BUILD_TYPE=${{env.BUILD_TYPE}}
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
slim_build_push:
if: ${{ needs.branch_build_setup.outputs.do_slim_build == 'true' }}
runs-on: ubuntu-22.04
needs: [branch_build_setup]
env:
BUILD_TYPE: slim
AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }}
AIO_IMAGE_TAGS: makeplane/plane-aio:slim-${{ needs.branch_build_setup.outputs.flat_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-stable,makeplane/plane-aio:${{env.BUILD_TYPE}}-${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-latest
else
TAG=${{ env.AIO_IMAGE_TAGS }}
fi
echo "AIO_IMAGE_TAGS=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v6.9.0
with:
context: .
file: ./aio/Dockerfile-app
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.AIO_IMAGE_TAGS }}
push: true
build-args: |
BASE_TAG=${{ env.AIO_BASE_TAG }}
BUILD_TYPE=${{env.BUILD_TYPE}}
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}

View File

@@ -1,70 +1,37 @@
name: Branch Build CE
name: Branch Build
on:
workflow_dispatch:
inputs:
build_type:
description: "Type of build to run"
required: true
type: choice
default: "Build"
options:
- "Build"
- "Release"
releaseVersion:
description: "Release Version"
type: string
default: v0.0.0
isPrerelease:
description: "Is Pre-release"
type: boolean
default: false
required: true
arm64:
description: "Build for ARM64 architecture"
required: false
default: false
type: boolean
push:
branches:
- master
- preview
- canary
release:
types: [released, prereleased]
env:
TARGET_BRANCH: ${{ github.ref_name }}
ARM64_BUILD: ${{ github.event.inputs.arm64 }}
BUILD_TYPE: ${{ github.event.inputs.build_type }}
RELEASE_VERSION: ${{ github.event.inputs.releaseVersion }}
IS_PRERELEASE: ${{ github.event.inputs.isPrerelease }}
TARGET_BRANCH: ${{ github.ref_name || github.event.release.target_commitish }}
jobs:
branch_build_setup:
name: Build Setup
runs-on: ubuntu-22.04
name: Build-Push Web/Space/API/Proxy Docker Image
runs-on: ubuntu-latest
outputs:
gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }}
gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }}
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
dh_img_web: ${{ steps.set_env_variables.outputs.DH_IMG_WEB }}
dh_img_space: ${{ steps.set_env_variables.outputs.DH_IMG_SPACE }}
dh_img_admin: ${{ steps.set_env_variables.outputs.DH_IMG_ADMIN }}
dh_img_live: ${{ steps.set_env_variables.outputs.DH_IMG_LIVE }}
dh_img_backend: ${{ steps.set_env_variables.outputs.DH_IMG_BACKEND }}
dh_img_proxy: ${{ steps.set_env_variables.outputs.DH_IMG_PROXY }}
build_type: ${{steps.set_env_variables.outputs.BUILD_TYPE}}
build_release: ${{ steps.set_env_variables.outputs.BUILD_RELEASE }}
build_prerelease: ${{ steps.set_env_variables.outputs.BUILD_PRERELEASE }}
release_version: ${{ steps.set_env_variables.outputs.RELEASE_VERSION }}
build_frontend: ${{ steps.changed_files.outputs.frontend_any_changed }}
build_space: ${{ steps.changed_files.outputs.space_any_changed }}
build_backend: ${{ steps.changed_files.outputs.backend_any_changed }}
build_proxy: ${{ steps.changed_files.outputs.proxy_any_changed }}
steps:
- id: set_env_variables
name: Set Environment Variables
run: |
if [ "${{ env.ARM64_BUILD }}" == "true" ] || ([ "${{ env.BUILD_TYPE }}" == "Release" ] && [ "${{ env.IS_PRERELEASE }}" != "true" ]); then
if [ "${{ env.TARGET_BRANCH }}" == "master" ] || [ "${{ github.event_name }}" == "release" ]; then
echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT
@@ -75,221 +42,240 @@ jobs:
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
fi
BR_NAME=$( echo "${{ env.TARGET_BRANCH }}" |sed 's/[^a-zA-Z0-9.-]//g')
echo "TARGET_BRANCH=$BR_NAME" >> $GITHUB_OUTPUT
echo "DH_IMG_WEB=plane-frontend" >> $GITHUB_OUTPUT
echo "DH_IMG_SPACE=plane-space" >> $GITHUB_OUTPUT
echo "DH_IMG_ADMIN=plane-admin" >> $GITHUB_OUTPUT
echo "DH_IMG_LIVE=plane-live" >> $GITHUB_OUTPUT
echo "DH_IMG_BACKEND=plane-backend" >> $GITHUB_OUTPUT
echo "DH_IMG_PROXY=plane-proxy" >> $GITHUB_OUTPUT
echo "BUILD_TYPE=${{env.BUILD_TYPE}}" >> $GITHUB_OUTPUT
BUILD_RELEASE=false
BUILD_PRERELEASE=false
RELVERSION="latest"
if [ "${{ env.BUILD_TYPE }}" == "Release" ]; then
FLAT_RELEASE_VERSION=$(echo "${{ env.RELEASE_VERSION }}" | sed 's/[^a-zA-Z0-9.-]//g')
echo "FLAT_RELEASE_VERSION=${FLAT_RELEASE_VERSION}" >> $GITHUB_OUTPUT
semver_regex="^v([0-9]+)\.([0-9]+)\.([0-9]+)(-[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*)?$"
if [[ ! $FLAT_RELEASE_VERSION =~ $semver_regex ]]; then
echo "Invalid Release Version Format : $FLAT_RELEASE_VERSION"
echo "Please provide a valid SemVer version"
echo "e.g. v1.2.3 or v1.2.3-alpha-1"
echo "Exiting the build process"
exit 1 # Exit with status 1 to fail the step
fi
BUILD_RELEASE=true
RELVERSION=$FLAT_RELEASE_VERSION
if [ "${{ env.IS_PRERELEASE }}" == "true" ]; then
BUILD_PRERELEASE=true
fi
fi
echo "BUILD_RELEASE=${BUILD_RELEASE}" >> $GITHUB_OUTPUT
echo "BUILD_PRERELEASE=${BUILD_PRERELEASE}" >> $GITHUB_OUTPUT
echo "RELEASE_VERSION=${RELVERSION}" >> $GITHUB_OUTPUT
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
branch_build_push_admin:
name: Build-Push Admin Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
steps:
- name: Admin Build and Push
uses: makeplane/actions/build-push@v1.0.0
- name: Get changed files
id: changed_files
uses: tj-actions/changed-files@v42
with:
build-release: ${{ needs.branch_build_setup.outputs.build_release }}
build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }}
release-version: ${{ needs.branch_build_setup.outputs.release_version }}
dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_admin }}
build-context: .
dockerfile-path: ./apps/admin/Dockerfile.admin
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
files_yaml: |
frontend:
- web/**
- packages/**
- 'package.json'
- 'yarn.lock'
- 'tsconfig.json'
- 'turbo.json'
space:
- space/**
- packages/**
- 'package.json'
- 'yarn.lock'
- 'tsconfig.json'
- 'turbo.json'
backend:
- apiserver/**
proxy:
- nginx/**
branch_build_push_web:
name: Build-Push Web Docker Image
runs-on: ubuntu-22.04
branch_build_push_frontend:
if: ${{ needs.branch_build_setup.outputs.build_frontend == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' || needs.branch_build_setup.outputs.gh_branch_name == 'master' }}
runs-on: ubuntu-20.04
needs: [branch_build_setup]
steps:
- name: Web Build and Push
uses: makeplane/actions/build-push@v1.0.0
with:
build-release: ${{ needs.branch_build_setup.outputs.build_release }}
build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }}
release-version: ${{ needs.branch_build_setup.outputs.release_version }}
dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_web }}
build-context: .
dockerfile-path: ./apps/web/Dockerfile.web
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
branch_build_push_space:
name: Build-Push Space Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
steps:
- name: Space Build and Push
uses: makeplane/actions/build-push@v1.0.0
with:
build-release: ${{ needs.branch_build_setup.outputs.build_release }}
build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }}
release-version: ${{ needs.branch_build_setup.outputs.release_version }}
dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_space }}
build-context: .
dockerfile-path: ./apps/space/Dockerfile.space
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
branch_build_push_live:
name: Build-Push Live Collaboration Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
steps:
- name: Live Build and Push
uses: makeplane/actions/build-push@v1.0.0
with:
build-release: ${{ needs.branch_build_setup.outputs.build_release }}
build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }}
release-version: ${{ needs.branch_build_setup.outputs.release_version }}
dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_live }}
build-context: .
dockerfile-path: ./apps/live/Dockerfile.live
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
branch_build_push_api:
name: Build-Push API Server Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
steps:
- name: Backend Build and Push
uses: makeplane/actions/build-push@v1.0.0
with:
build-release: ${{ needs.branch_build_setup.outputs.build_release }}
build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }}
release-version: ${{ needs.branch_build_setup.outputs.release_version }}
dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_backend }}
build-context: ./apps/api
dockerfile-path: ./apps/api/Dockerfile.api
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
branch_build_push_proxy:
name: Build-Push Proxy Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
steps:
- name: Proxy Build and Push
uses: makeplane/actions/build-push@v1.0.0
with:
build-release: ${{ needs.branch_build_setup.outputs.build_release }}
build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }}
release-version: ${{ needs.branch_build_setup.outputs.release_version }}
dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }}
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_proxy }}
build-context: ./apps/proxy
dockerfile-path: ./apps/proxy/Dockerfile.ce
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
publish_release:
if: ${{ needs.branch_build_setup.outputs.build_type == 'Release' }}
name: Build Release
runs-on: ubuntu-22.04
needs:
[
branch_build_setup,
branch_build_push_admin,
branch_build_push_web,
branch_build_push_space,
branch_build_push_live,
branch_build_push_api,
branch_build_push_proxy,
]
env:
REL_VERSION: ${{ needs.branch_build_setup.outputs.release_version }}
FRONTEND_TAG: ${{ secrets.DOCKERHUB_USERNAME }}/plane-frontend:${{ needs.branch_build_setup.outputs.gh_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Checkout
- name: Set Frontend Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-frontend:stable,${{ secrets.DOCKERHUB_USERNAME }}/plane-frontend:${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-frontend:latest
else
TAG=${{ env.FRONTEND_TAG }}
fi
echo "FRONTEND_TAG=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Update Assets
run: |
cp ./deploy/selfhost/install.sh deploy/selfhost/setup.sh
sed -i 's/${APP_RELEASE:-stable}/${APP_RELEASE:-'${REL_VERSION}'}/g' deploy/selfhost/docker-compose.yml
# sed -i 's/APP_RELEASE=stable/APP_RELEASE='${REL_VERSION}'/g' deploy/selfhost/variables.env
- name: Create Release
id: create_release
uses: softprops/action-gh-release@v2.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
- name: Build and Push Frontend to Docker Container Registry
uses: docker/build-push-action@v5.1.0
with:
tag_name: ${{ env.REL_VERSION }}
name: ${{ env.REL_VERSION }}
draft: false
prerelease: ${{ env.IS_PRERELEASE }}
generate_release_notes: true
files: |
${{ github.workspace }}/deploy/selfhost/setup.sh
${{ github.workspace }}/deploy/selfhost/swarm.sh
${{ github.workspace }}/deploy/selfhost/restore.sh
${{ github.workspace }}/deploy/selfhost/restore-airgapped.sh
${{ github.workspace }}/deploy/selfhost/docker-compose.yml
${{ github.workspace }}/deploy/selfhost/variables.env
context: .
file: ./web/Dockerfile.web
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.FRONTEND_TAG }}
push: true
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
branch_build_push_space:
if: ${{ needs.branch_build_setup.outputs.build_space == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' || needs.branch_build_setup.outputs.gh_branch_name == 'master' }}
runs-on: ubuntu-20.04
needs: [branch_build_setup]
env:
SPACE_TAG: ${{ secrets.DOCKERHUB_USERNAME }}/plane-space:${{ needs.branch_build_setup.outputs.gh_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Space Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-space:stable,${{ secrets.DOCKERHUB_USERNAME }}/plane-space:${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-space:latest
else
TAG=${{ env.SPACE_TAG }}
fi
echo "SPACE_TAG=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push Space to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: .
file: ./space/Dockerfile.space
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.SPACE_TAG }}
push: true
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
branch_build_push_backend:
if: ${{ needs.branch_build_setup.outputs.build_backend == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' || needs.branch_build_setup.outputs.gh_branch_name == 'master' }}
runs-on: ubuntu-20.04
needs: [branch_build_setup]
env:
BACKEND_TAG: ${{ secrets.DOCKERHUB_USERNAME }}/plane-backend:${{ needs.branch_build_setup.outputs.gh_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Backend Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-backend:stable,${{ secrets.DOCKERHUB_USERNAME }}/plane-backend:${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-backend:latest
else
TAG=${{ env.BACKEND_TAG }}
fi
echo "BACKEND_TAG=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push Backend to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: ./apiserver
file: ./apiserver/Dockerfile.api
platforms: ${{ env.BUILDX_PLATFORMS }}
push: true
tags: ${{ env.BACKEND_TAG }}
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
branch_build_push_proxy:
if: ${{ needs.branch_build_setup.outputs.build_proxy == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'release' || needs.branch_build_setup.outputs.gh_branch_name == 'master' }}
runs-on: ubuntu-20.04
needs: [branch_build_setup]
env:
PROXY_TAG: ${{ secrets.DOCKERHUB_USERNAME }}/plane-proxy:${{ needs.branch_build_setup.outputs.gh_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Proxy Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-proxy:stable,${{ secrets.DOCKERHUB_USERNAME }}/plane-proxy:${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=${{ secrets.DOCKERHUB_USERNAME }}/plane-proxy:latest
else
TAG=${{ env.PROXY_TAG }}
fi
echo "PROXY_TAG=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push Plane-Proxy to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: ./nginx
file: ./nginx/Dockerfile
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.PROXY_TAG }}
push: true
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}

View File

@@ -3,93 +3,102 @@ name: Build and Lint on Pull Request
on:
workflow_dispatch:
pull_request:
types: ["opened", "synchronize", "ready_for_review"]
types: ["opened", "synchronize"]
jobs:
lint-server:
if: github.event.pull_request.draft == false
get-changed-files:
runs-on: ubuntu-latest
outputs:
apiserver_changed: ${{ steps.changed-files.outputs.apiserver_any_changed }}
web_changed: ${{ steps.changed-files.outputs.web_any_changed }}
space_changed: ${{ steps.changed-files.outputs.deploy_any_changed }}
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
- uses: actions/checkout@v3
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v41
with:
python-version: "3.x" # Specify the Python version you need
files_yaml: |
apiserver:
- apiserver/**
web:
- web/**
- packages/**
- 'package.json'
- 'yarn.lock'
- 'tsconfig.json'
- 'turbo.json'
deploy:
- space/**
- packages/**
- 'package.json'
- 'yarn.lock'
- 'tsconfig.json'
- 'turbo.json'
lint-apiserver:
needs: get-changed-files
runs-on: ubuntu-latest
if: needs.get-changed-files.outputs.apiserver_changed == 'true'
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x' # Specify the Python version you need
- name: Install Pylint
run: python -m pip install ruff
- name: Install Server Dependencies
run: cd apps/server && pip install -r requirements.txt
- name: Lint apps/server
run: ruff check --fix apps/server
lint-admin:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20.x
- run: yarn install
- run: yarn lint --filter=admin
lint-space:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20.x
- run: yarn install
- run: yarn lint --filter=space
- name: Install Apiserver Dependencies
run: cd apiserver && pip install -r requirements.txt
- name: Lint apiserver
run: ruff check --fix apiserver
lint-web:
if: github.event.pull_request.draft == false
needs: get-changed-files
if: needs.get-changed-files.outputs.web_changed == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: 20.x
node-version: 18.x
- run: yarn install
- run: yarn lint --filter=web
build-admin:
needs: lint-admin
lint-space:
needs: get-changed-files
if: needs.get-changed-files.outputs.space_changed == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: 20.x
node-version: 18.x
- run: yarn install
- run: yarn build --filter=admin
build-space:
needs: lint-space
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20.x
- run: yarn install
- run: yarn build --filter=space
- run: yarn lint --filter=space
build-web:
needs: lint-web
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: 20.x
node-version: 18.x
- run: yarn install
- run: yarn build --filter=web
build-space:
needs: lint-space
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v2
with:
node-version: 18.x
- run: yarn install
- run: yarn build --filter=space

View File

@@ -29,11 +29,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -46,7 +46,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@@ -59,6 +59,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

55
.github/workflows/create-sync-pr.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Create Sync Action
on:
workflow_dispatch:
push:
branches:
- preview
env:
SOURCE_BRANCH_NAME: ${{ github.ref_name }}
jobs:
sync_changes:
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: read
steps:
- name: Checkout Code
uses: actions/checkout@v4.1.1
with:
persist-credentials: false
fetch-depth: 0
- name: Setup GH CLI
run: |
type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y)
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null
sudo apt update
sudo apt install gh -y
- name: Push Changes to Target Repo A
env:
GH_TOKEN: ${{ secrets.ACCESS_TOKEN }}
run: |
TARGET_REPO="${{ secrets.TARGET_REPO_A }}"
TARGET_BRANCH="${{ secrets.TARGET_REPO_A_BRANCH_NAME }}"
SOURCE_BRANCH="${{ env.SOURCE_BRANCH_NAME }}"
git checkout $SOURCE_BRANCH
git remote add target-origin-a "https://$GH_TOKEN@github.com/$TARGET_REPO.git"
git push target-origin-a $SOURCE_BRANCH:$TARGET_BRANCH
- name: Push Changes to Target Repo B
env:
GH_TOKEN: ${{ secrets.ACCESS_TOKEN }}
run: |
TARGET_REPO="${{ secrets.TARGET_REPO_B }}"
TARGET_BRANCH="${{ secrets.TARGET_REPO_B_BRANCH_NAME }}"
SOURCE_BRANCH="${{ env.SOURCE_BRANCH_NAME }}"
git remote add target-origin-b "https://$GH_TOKEN@github.com/$TARGET_REPO.git"
git push target-origin-b $SOURCE_BRANCH:$TARGET_BRANCH

View File

@@ -3,108 +3,130 @@ name: Feature Preview
on:
workflow_dispatch:
inputs:
base_tag_name:
description: 'Base Tag Name'
web-build:
required: false
default: 'preview'
description: 'Build Web'
type: boolean
default: true
space-build:
required: false
description: 'Build Space'
type: boolean
default: false
env:
TARGET_BRANCH: ${{ github.ref_name }}
BUILD_WEB: ${{ github.event.inputs.web-build }}
BUILD_SPACE: ${{ github.event.inputs.space-build }}
jobs:
branch_build_setup:
name: Build Setup
setup-feature-build:
name: Feature Build Setup
runs-on: ubuntu-latest
outputs:
gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }}
flat_branch_name: ${{ steps.set_env_variables.outputs.FLAT_BRANCH_NAME }}
gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }}
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
aio_base_tag: ${{ steps.set_env_variables.outputs.AIO_BASE_TAG }}
do_full_build: ${{ steps.set_env_variables.outputs.DO_FULL_BUILD }}
do_slim_build: ${{ steps.set_env_variables.outputs.DO_SLIM_BUILD }}
steps:
- id: set_env_variables
name: Set Environment Variables
- name: Checkout
run: |
echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
if [ "${{ github.event.inputs.base_tag_name }}" != "" ]; then
echo "AIO_BASE_TAG=${{ github.event.inputs.base_tag_name }}" >> $GITHUB_OUTPUT
else
echo "AIO_BASE_TAG=develop" >> $GITHUB_OUTPUT
fi
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
FLAT_BRANCH_NAME=$(echo "${{ env.TARGET_BRANCH }}" | sed 's/[^a-zA-Z0-9]/-/g')
echo "FLAT_BRANCH_NAME=$FLAT_BRANCH_NAME" >> $GITHUB_OUTPUT
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
full_build_push:
runs-on: ubuntu-22.04
needs: [branch_build_setup]
env:
BUILD_TYPE: full
AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }}
AIO_IMAGE_TAGS: makeplane/plane-aio-feature:${{ needs.branch_build_setup.outputs.flat_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v6.9.0
with:
context: .
file: ./aio/Dockerfile-app
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.AIO_IMAGE_TAGS }}
push: true
build-args:
BUILD_TAG=${{ env.AIO_BASE_TAG }}
BUILD_TYPE=${{env.BUILD_TYPE}}
# cache-from: type=gha
# cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
echo "BUILD_WEB=$BUILD_WEB"
echo "BUILD_SPACE=$BUILD_SPACE"
outputs:
AIO_IMAGE_TAGS: ${{ env.AIO_IMAGE_TAGS }}
web-build: ${{ env.BUILD_WEB}}
space-build: ${{env.BUILD_SPACE}}
feature-build-web:
if: ${{ needs.setup-feature-build.outputs.web-build == 'true' }}
needs: setup-feature-build
name: Feature Build Web
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
NEXT_PUBLIC_API_BASE_URL: ${{ vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL }}
steps:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install AWS cli
run: |
sudo apt-get update
sudo apt-get install -y python3-pip
pip3 install awscli
- name: Checkout
uses: actions/checkout@v4
with:
path: plane
- name: Install Dependencies
run: |
cd $GITHUB_WORKSPACE/plane
yarn install
- name: Build Web
id: build-web
run: |
cd $GITHUB_WORKSPACE/plane
yarn build --filter=web
cd $GITHUB_WORKSPACE
TAR_NAME="web.tar.gz"
tar -czf $TAR_NAME ./plane
FILE_EXPIRY=$(date -u -d "+2 days" +"%Y-%m-%dT%H:%M:%SZ")
aws s3 cp $TAR_NAME s3://${{ env.AWS_BUCKET }}/${{github.sha}}/$TAR_NAME --expires $FILE_EXPIRY
feature-build-space:
if: ${{ needs.setup-feature-build.outputs.space-build == 'true' }}
needs: setup-feature-build
name: Feature Build Space
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
NEXT_PUBLIC_DEPLOY_WITH_NGINX: 1
NEXT_PUBLIC_API_BASE_URL: ${{ vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL }}
outputs:
do-build: ${{ needs.setup-feature-build.outputs.space-build }}
s3-url: ${{ steps.build-space.outputs.S3_PRESIGNED_URL }}
steps:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install AWS cli
run: |
sudo apt-get update
sudo apt-get install -y python3-pip
pip3 install awscli
- name: Checkout
uses: actions/checkout@v4
with:
path: plane
- name: Install Dependencies
run: |
cd $GITHUB_WORKSPACE/plane
yarn install
- name: Build Space
id: build-space
run: |
cd $GITHUB_WORKSPACE/plane
yarn build --filter=space
cd $GITHUB_WORKSPACE
TAR_NAME="space.tar.gz"
tar -czf $TAR_NAME ./plane
FILE_EXPIRY=$(date -u -d "+2 days" +"%Y-%m-%dT%H:%M:%SZ")
aws s3 cp $TAR_NAME s3://${{ env.AWS_BUCKET }}/${{github.sha}}/$TAR_NAME --expires $FILE_EXPIRY
feature-deploy:
needs: [branch_build_setup, full_build_push]
if: ${{ always() && (needs.setup-feature-build.outputs.web-build == 'true' || needs.setup-feature-build.outputs.space-build == 'true') }}
needs: [feature-build-web, feature-build-space]
name: Feature Deploy
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
KUBE_CONFIG_FILE: ${{ secrets.FEATURE_PREVIEW_KUBE_CONFIG }}
DEPLOYMENT_NAME: ${{ needs.branch_build_setup.outputs.flat_branch_name }}
steps:
- name: Install AWS cli
run: |
@@ -132,37 +154,46 @@ jobs:
./get_helm.sh
- name: App Deploy
run: |
helm --kube-insecure-skip-tls-verify repo add feature-preview ${{ vars.FEATURE_PREVIEW_HELM_CHART_URL }}
WEB_S3_URL=""
if [ ${{ env.BUILD_WEB }} == true ]; then
WEB_S3_URL=$(aws s3 presign s3://${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}/${{github.sha}}/web.tar.gz --expires-in 3600)
fi
APP_NAMESPACE="${{ vars.FEATURE_PREVIEW_NAMESPACE }}"
SPACE_S3_URL=""
if [ ${{ env.BUILD_SPACE }} == true ]; then
SPACE_S3_URL=$(aws s3 presign s3://${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}/${{github.sha}}/space.tar.gz --expires-in 3600)
fi
helm --kube-insecure-skip-tls-verify uninstall \
${{ env.DEPLOYMENT_NAME }} \
--namespace $APP_NAMESPACE \
--timeout 10m0s \
--wait \
--ignore-not-found
if [ ${{ env.BUILD_WEB }} == true ] || [ ${{ env.BUILD_SPACE }} == true ]; then
METADATA=$(helm --kube-insecure-skip-tls-verify upgrade \
--install=true \
--namespace $APP_NAMESPACE \
--set dockerhub.loginid=${{ secrets.DOCKERHUB_USERNAME }} \
--set dockerhub.password=${{ secrets.DOCKERHUB_TOKEN_RO}} \
--set config.feature_branch=${{ env.DEPLOYMENT_NAME }} \
--set ingress.primaryDomain=${{vars.FEATURE_PREVIEW_PRIMARY_DOMAIN || 'feature.plane.tools' }} \
--set ingress.tls_secret=${{vars.FEATURE_PREVIEW_INGRESS_TLS_SECRET || '' }} \
--output json \
--timeout 10m0s \
--wait \
${{ env.DEPLOYMENT_NAME }} feature-preview/${{ vars.FEATURE_PREVIEW_HELM_CHART_NAME }} )
helm --kube-insecure-skip-tls-verify repo add feature-preview ${{ vars.FEATURE_PREVIEW_HELM_CHART_URL }}
APP_NAME=$(echo $METADATA | jq -r '.name')
APP_NAMESPACE="${{ vars.FEATURE_PREVIEW_NAMESPACE }}"
DEPLOY_SCRIPT_URL="${{ vars.FEATURE_PREVIEW_DEPLOY_SCRIPT_URL }}"
INGRESS_HOSTNAME=$(kubectl get ingress -n $APP_NAMESPACE --insecure-skip-tls-verify \
-o jsonpath='{.items[?(@.metadata.annotations.meta\.helm\.sh\/release-name=="'$APP_NAME'")]}' | \
jq -r '.spec.rules[0].host')
METADATA=$(helm --kube-insecure-skip-tls-verify install feature-preview/${{ vars.FEATURE_PREVIEW_HELM_CHART_NAME }} \
--generate-name \
--namespace $APP_NAMESPACE \
--set ingress.primaryDomain=${{vars.FEATURE_PREVIEW_PRIMARY_DOMAIN || 'feature.plane.tools' }} \
--set web.image=${{vars.FEATURE_PREVIEW_DOCKER_BASE}} \
--set web.enabled=${{ env.BUILD_WEB || false }} \
--set web.artifact_url=$WEB_S3_URL \
--set space.image=${{vars.FEATURE_PREVIEW_DOCKER_BASE}} \
--set space.enabled=${{ env.BUILD_SPACE || false }} \
--set space.artifact_url=$SPACE_S3_URL \
--set shared_config.deploy_script_url=$DEPLOY_SCRIPT_URL \
--set shared_config.api_base_url=${{vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL}} \
--output json \
--timeout 1000s)
echo "****************************************"
echo "APP NAME ::: $APP_NAME"
echo "INGRESS HOSTNAME ::: $INGRESS_HOSTNAME"
echo "****************************************"
APP_NAME=$(echo $METADATA | jq -r '.name')
INGRESS_HOSTNAME=$(kubectl get ingress -n feature-builds --insecure-skip-tls-verify \
-o jsonpath='{.items[?(@.metadata.annotations.meta\.helm\.sh\/release-name=="'$APP_NAME'")]}' | \
jq -r '.spec.rules[0].host')
echo "****************************************"
echo "APP NAME ::: $APP_NAME"
echo "INGRESS HOSTNAME ::: $INGRESS_HOSTNAME"
echo "****************************************"
fi

View File

@@ -1,52 +0,0 @@
name: Create PR on Sync
on:
workflow_dispatch:
push:
branches:
- "sync/**"
env:
CURRENT_BRANCH: ${{ github.ref_name }}
TARGET_BRANCH: "preview" # The target branch that you would like to merge changes like develop
GITHUB_TOKEN: ${{ secrets.ACCESS_TOKEN }} # Personal access token required to modify contents and workflows
ACCOUNT_USER_NAME: ${{ vars.ACCOUNT_USER_NAME }}
ACCOUNT_USER_EMAIL: ${{ vars.ACCOUNT_USER_EMAIL }}
jobs:
create_pull_request:
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for all branches and tags
- name: Setup Git
run: |
git config user.name "$ACCOUNT_USER_NAME"
git config user.email "$ACCOUNT_USER_EMAIL"
- name: Setup GH CLI and Git Config
run: |
type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y)
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null
sudo apt update
sudo apt install gh -y
- name: Create PR to Target Branch
run: |
# get all pull requests and check if there is already a PR
PR_EXISTS=$(gh pr list --base $TARGET_BRANCH --head $CURRENT_BRANCH --state open --json number | jq '.[] | .number')
if [ -n "$PR_EXISTS" ]; then
echo "Pull Request already exists: $PR_EXISTS"
else
echo "Creating new pull request"
PR_URL=$(gh pr create --base $TARGET_BRANCH --head $CURRENT_BRANCH --title "${{ vars.SYNC_PR_TITLE }}" --body "")
echo "Pull Request created: $PR_URL"
fi

View File

@@ -1,44 +0,0 @@
name: Sync Repositories
on:
workflow_dispatch:
push:
branches:
- preview
env:
SOURCE_BRANCH_NAME: ${{ github.ref_name }}
jobs:
sync_changes:
runs-on: ubuntu-22.04
permissions:
pull-requests: write
contents: read
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
persist-credentials: false
fetch-depth: 0
- name: Setup GH CLI
run: |
type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y)
curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg
sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null
sudo apt update
sudo apt install gh -y
- name: Push Changes to Target Repo
env:
GH_TOKEN: ${{ secrets.ACCESS_TOKEN }}
run: |
TARGET_REPO="${{ vars.SYNC_TARGET_REPO }}"
TARGET_BRANCH="${{ vars.SYNC_TARGET_BRANCH_NAME }}"
SOURCE_BRANCH="${{ env.SOURCE_BRANCH_NAME }}"
git checkout $SOURCE_BRANCH
git remote add target-origin-a "https://$GH_TOKEN@github.com/$TARGET_REPO.git"
git push target-origin-a $SOURCE_BRANCH:$TARGET_BRANCH

14
.gitignore vendored
View File

@@ -1,6 +1,5 @@
node_modules
.next
.yarn
### NextJS ###
# Dependencies
@@ -53,8 +52,6 @@ mediafiles
.env
.DS_Store
logs/
htmlcov/
.coverage
node_modules/
assets/dist/
@@ -81,17 +78,6 @@ pnpm-workspace.yaml
.npmrc
.secrets
tmp/
## packages
dist
.temp/
deploy/selfhost/plane-app/
## Storybook
*storybook.log
output.css
dev-editor
# Redis
*.rdb
*.rdb.gz

View File

@@ -1,16 +0,0 @@
{ pkgs, ... }: {
# Which nixpkgs channel to use.
channel = "stable-23.11"; # or "unstable"
# Use https://search.nixos.org/packages to find packages
packages = [
pkgs.nodejs_20
pkgs.python3
];
services.docker.enable = true;
services.postgres.enable = true;
services.redis.enable = true;
}

View File

@@ -1 +0,0 @@
nodeLinker: node-modules

View File

@@ -4,7 +4,7 @@ Thank you for showing an interest in contributing to Plane! All kinds of contrib
## Submitting an issue
Before submitting a new issue, please search the [issues](https://github.com/makeplane/plane/issues) tab. Maybe an issue or discussion already exists and might inform you of workarounds. Otherwise, you can give new information.
Before submitting a new issue, please search the [issues](https://github.com/makeplane/plane/issues) tab. Maybe an issue or discussion already exists and might inform you of workarounds. Otherwise, you can give new informplaneation.
While we want to fix all the [issues](https://github.com/makeplane/plane/issues), before fixing a bug we need to be able to reproduce and confirm it. Please provide us with a minimal reproduction scenario using a repository or [Gist](https://gist.github.com/). Having a live, reproducible scenario gives us the information without asking questions back & forth with additional questions like:
@@ -15,40 +15,20 @@ Without said minimal reproduction, we won't be able to investigate all [issues](
You can open a new issue with this [issue form](https://github.com/makeplane/plane/issues/new).
### Naming conventions for issues
When opening a new issue, please use a clear and concise title that follows this format:
- For bugs: `🐛 Bug: [short description]`
- For features: `🚀 Feature: [short description]`
- For improvements: `🛠️ Improvement: [short description]`
- For documentation: `📘 Docs: [short description]`
**Examples:**
- `🐛 Bug: API token expiry time not saving correctly`
- `📘 Docs: Clarify RAM requirement for local setup`
- `🚀 Feature: Allow custom time selection for token expiration`
This helps us triage and manage issues more efficiently.
## Projects setup and Architecture
### Requirements
- Docker Engine installed and running
- Node.js version 20+ [LTS version](https://nodejs.org/en/about/previous-releases)
- Node.js version v16.18.0
- Python version 3.8+
- Postgres version v14
- Redis version v6.2.7
- **Memory**: Minimum **12 GB RAM** recommended
> ⚠️ Running the project on a system with only 8 GB RAM may lead to setup failures or memory crashes (especially during Docker container build/start or dependency install). Use cloud environments like GitHub Codespaces or upgrade local RAM if possible.
### Setup the project
The project is a monorepo, with backend api and frontend in a single repo.
The backend is a django project which is kept inside apps/api
The backend is a django project which is kept inside apiserver
1. Clone the repo
@@ -70,17 +50,6 @@ chmod +x setup.sh
docker compose -f docker-compose-local.yml up
```
4. Start web apps:
```bash
yarn dev
```
5. Open your browser to http://localhost:3001/god-mode/ and register yourself as instance admin
6. Open up your browser to http://localhost:3000 then log in using the same credentials from the previous step
Thats it! Youre all set to begin coding. Remember to refresh your browser if changes dont auto-reload. Happy contributing! 🎉
## Missing a Feature?
If a feature is missing, you can directly _request_ a new one [here](https://github.com/makeplane/plane/issues/new?assignees=&labels=feature&template=feature_request.yml&title=%F0%9F%9A%80+Feature%3A+). You also can do the same by choosing "🚀 Feature" when raising a [New Issue](https://github.com/makeplane/plane/issues/new/choose) on our GitHub Repository.
@@ -93,155 +62,17 @@ To ensure consistency throughout the source code, please keep these rules in min
- All features or bug fixes must be tested by one or more specs (unit-tests).
- We use [Eslint default rule guide](https://eslint.org/docs/rules/), with minor changes. An automated formatter is available using prettier.
## Need help? Questions and suggestions
Questions, suggestions, and thoughts are most welcome. We can also be reached in our [Discord Server](https://discord.com/invite/A92xrEGCge).
## Ways to contribute
- Try Plane Cloud and the self hosting platform and give feedback
- Add new integrations
- Add or update translations
- Help with open [issues](https://github.com/makeplane/plane/issues) or [create your own](https://github.com/makeplane/plane/issues/new/choose)
- Share your thoughts and suggestions with us
- Help create tutorials and blog posts
- Request a feature by submitting a proposal
- Report a bug
- **Improve documentation** - fix incomplete or missing [docs](https://docs.plane.so/), bad wording, examples or explanations.
## Contributing to language support
This guide is designed to help contributors understand how to add or update translations in the application.
### Understanding translation structure
#### File organization
Translations are organized by language in the locales directory. Each language has its own folder containing JSON files for translations. Here's how it looks:
```
packages/i18n/src/locales/
├── en/
│ ├── core.json # Critical translations
│ └── translations.json
├── fr/
│ └── translations.json
└── [language]/
└── translations.json
```
#### Nested structure
To keep translations organized, we use a nested structure for keys. This makes it easier to manage and locate specific translations. For example:
```json
{
"issue": {
"label": "Work item",
"title": {
"label": "Work item title"
}
}
}
```
### Translation formatting guide
We use [IntlMessageFormat](https://formatjs.github.io/docs/intl-messageformat/) to handle dynamic content, such as variables and pluralization. Here's how to format your translations:
#### Examples
- **Simple variables**
```json
{
"greeting": "Hello, {name}!"
}
```
- **Pluralization**
```json
{
"items": "{count, plural, one {Work item} other {Work items}}"
}
```
### Contributing guidelines
#### Updating existing translations
1. Locate the key in `locales/<language>/translations.json`.
2. Update the value while ensuring the key structure remains intact.
3. Preserve any existing ICU formats (e.g., variables, pluralization).
#### Adding new translation keys
1. When introducing a new key, ensure it is added to **all** language files, even if translations are not immediately available. Use English as a placeholder if needed.
2. Keep the nesting structure consistent across all languages.
3. If the new key requires dynamic content (e.g., variables or pluralization), ensure the ICU format is applied uniformly across all languages.
### Adding new languages
Adding a new language involves several steps to ensure it integrates seamlessly with the project. Follow these instructions carefully:
1. **Update type definitions**
Add the new language to the TLanguage type in the language definitions file:
```typescript
// types/language.ts
export type TLanguage = "en" | "fr" | "your-lang";
```
2. **Add language configuration**
Include the new language in the list of supported languages:
```typescript
// constants/language.ts
export const SUPPORTED_LANGUAGES: ILanguageOption[] = [
{ label: "English", value: "en" },
{ label: "Your Language", value: "your-lang" }
];
```
3. **Create translation files**
1. Create a new folder for your language under locales (e.g., `locales/your-lang/`).
2. Add a `translations.json` file inside the folder.
3. Copy the structure from an existing translation file and translate all keys.
4. **Update import logic**
Modify the language import logic to include your new language:
```typescript
private importLanguageFile(language: TLanguage): Promise<any> {
switch (language) {
case "your-lang":
return import("../locales/your-lang/translations.json");
// ...
}
}
```
### Quality checklist
Before submitting your contribution, please ensure the following:
- All translation keys exist in every language file.
- Nested structures match across all language files.
- ICU message formats are correctly implemented.
- All languages load without errors in the application.
- Dynamic values and pluralization work as expected.
- There are no missing or untranslated keys.
#### Pro tips
- When in doubt, refer to the English translations for context.
- Verify pluralization works with different numbers.
- Ensure dynamic values (e.g., `{name}`) are correctly interpolated.
- Double-check that nested key access paths are accurate.
Happy translating! 🌍✨
## Need help? Questions and suggestions
Questions, suggestions, and thoughts are most welcome. We can also be reached in our [Discord Server](https://discord.com/invite/A92xrEGCge).

124
Dockerfile Normal file
View File

@@ -0,0 +1,124 @@
FROM node:18-alpine AS builder
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
ENV NEXT_PUBLIC_API_BASE_URL=http://NEXT_PUBLIC_API_BASE_URL_PLACEHOLDER
RUN yarn global add turbo
RUN apk add tree
COPY . .
RUN turbo prune --scope=app --scope=plane-deploy --docker
CMD tree -I node_modules/
# Add lockfile and package.json's of isolated subworkspace
FROM node:18-alpine AS installer
RUN apk add --no-cache libc6-compat
WORKDIR /app
ARG NEXT_PUBLIC_API_BASE_URL=http://localhost:8000
# First install the dependencies (as they change less often)
COPY .gitignore .gitignore
COPY --from=builder /app/out/json/ .
COPY --from=builder /app/out/yarn.lock ./yarn.lock
RUN yarn install
# # Build the project
COPY --from=builder /app/out/full/ .
COPY turbo.json turbo.json
COPY replace-env-vars.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/replace-env-vars.sh
RUN yarn turbo run build
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL \
BUILT_NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
RUN /usr/local/bin/replace-env-vars.sh http://NEXT_PUBLIC_WEBAPP_URL_PLACEHOLDER ${NEXT_PUBLIC_API_BASE_URL}
FROM python:3.11.1-alpine3.17 AS backend
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
WORKDIR /code
RUN apk --no-cache add \
"libpq~=15" \
"libxslt~=1.1" \
"nodejs-current~=19" \
"xmlsec~=1.2" \
"nginx" \
"nodejs" \
"npm" \
"supervisor"
COPY apiserver/requirements.txt ./
COPY apiserver/requirements ./requirements
RUN apk add --no-cache libffi-dev
RUN apk add --no-cache --virtual .build-deps \
"bash~=5.2" \
"g++~=12.2" \
"gcc~=12.2" \
"cargo~=1.64" \
"git~=2" \
"make~=4.3" \
"postgresql13-dev~=13" \
"libc-dev" \
"linux-headers" \
&& \
pip install -r requirements.txt --compile --no-cache-dir \
&& \
apk del .build-deps
# Add in Django deps and generate Django's static files
COPY apiserver/manage.py manage.py
COPY apiserver/plane plane/
COPY apiserver/templates templates/
RUN apk --no-cache add "bash~=5.2"
COPY apiserver/bin ./bin/
RUN chmod +x ./bin/takeoff ./bin/worker
RUN chmod -R 777 /code
# Expose container port and run entry point script
WORKDIR /app
COPY --from=installer /app/apps/app/next.config.js .
COPY --from=installer /app/apps/app/package.json .
COPY --from=installer /app/apps/space/next.config.js .
COPY --from=installer /app/apps/space/package.json .
COPY --from=installer /app/apps/app/.next/standalone ./
COPY --from=installer /app/apps/app/.next/static ./apps/app/.next/static
COPY --from=installer /app/apps/space/.next/standalone ./
COPY --from=installer /app/apps/space/.next ./apps/space/.next
ENV NEXT_TELEMETRY_DISABLED 1
# RUN rm /etc/nginx/conf.d/default.conf
#######################################################################
COPY nginx/nginx-single-docker-image.conf /etc/nginx/http.d/default.conf
#######################################################################
COPY nginx/supervisor.conf /code/supervisor.conf
ARG NEXT_PUBLIC_API_BASE_URL=http://localhost:8000
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL \
BUILT_NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
COPY replace-env-vars.sh /usr/local/bin/
COPY start.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/replace-env-vars.sh
RUN chmod +x /usr/local/bin/start.sh
EXPOSE 80
CMD ["supervisord","-c","/code/supervisor.conf"]

130
ENV_SETUP.md Normal file
View File

@@ -0,0 +1,130 @@
# Environment Variables
Environment variables are distributed in various files. Please refer them carefully.
## {PROJECT_FOLDER}/.env
File is available in the project root folder
```
# Database Settings
PGUSER="plane"
PGPASSWORD="plane"
PGHOST="plane-db"
PGDATABASE="plane"
DATABASE_URL=postgresql://${PGUSER}:${PGPASSWORD}@${PGHOST}/${PGDATABASE}
# Redis Settings
REDIS_HOST="plane-redis"
REDIS_PORT="6379"
REDIS_URL="redis://${REDIS_HOST}:6379/"
# AWS Settings
AWS_REGION=""
AWS_ACCESS_KEY_ID="access-key"
AWS_SECRET_ACCESS_KEY="secret-key"
AWS_S3_ENDPOINT_URL="http://plane-minio:9000"
# Changing this requires change in the nginx.conf for uploads if using minio setup
AWS_S3_BUCKET_NAME="uploads"
# Maximum file upload limit
FILE_SIZE_LIMIT=5242880
# GPT settings
OPENAI_API_BASE="https://api.openai.com/v1" # deprecated
OPENAI_API_KEY="sk-" # deprecated
GPT_ENGINE="gpt-3.5-turbo" # deprecated
# set to 1 If using the pre-configured minio setup
USE_MINIO=1
# Nginx Configuration
NGINX_PORT=80
```
## {PROJECT_FOLDER}/web/.env.example
```
# Public boards deploy URL
NEXT_PUBLIC_DEPLOY_URL="http://localhost/spaces"
```
## {PROJECT_FOLDER}/apiserver/.env
```
# Backend
# Debug value for api server use it as 0 for production use
DEBUG=0
# Error logs
SENTRY_DSN=""
# Database Settings
PGUSER="plane"
PGPASSWORD="plane"
PGHOST="plane-db"
PGDATABASE="plane"
DATABASE_URL=postgresql://${PGUSER}:${PGPASSWORD}@${PGHOST}/${PGDATABASE}
# Redis Settings
REDIS_HOST="plane-redis"
REDIS_PORT="6379"
REDIS_URL="redis://${REDIS_HOST}:6379/"
# Email Settings
EMAIL_HOST=""
EMAIL_HOST_USER=""
EMAIL_HOST_PASSWORD=""
EMAIL_PORT=587
EMAIL_FROM="Team Plane <team@mailer.plane.so>"
EMAIL_USE_TLS="1"
EMAIL_USE_SSL="0"
# AWS Settings
AWS_REGION=""
AWS_ACCESS_KEY_ID="access-key"
AWS_SECRET_ACCESS_KEY="secret-key"
AWS_S3_ENDPOINT_URL="http://plane-minio:9000"
# Changing this requires change in the nginx.conf for uploads if using minio setup
AWS_S3_BUCKET_NAME="uploads"
# Maximum file upload limit
FILE_SIZE_LIMIT=5242880
# GPT settings
OPENAI_API_BASE="https://api.openai.com/v1" # deprecated
OPENAI_API_KEY="sk-" # deprecated
GPT_ENGINE="gpt-3.5-turbo" # deprecated
# Settings related to Docker
DOCKERIZED=1 # Deprecated
# Github
GITHUB_CLIENT_SECRET="" # For fetching release notes
# set to 1 If using the pre-configured minio setup
USE_MINIO=1
# Nginx Configuration
NGINX_PORT=80
# SignUps
ENABLE_SIGNUP="1"
# Email Redirection URL
WEB_URL="http://localhost"
```
## Updates
- The environment variable NEXT_PUBLIC_API_BASE_URL has been removed from both the web and space projects.
- The naming convention for containers and images has been updated.
- The plane-worker image will no longer be maintained, as it has been merged with plane-backend.
- The Tiptap pro-extension dependency has been removed, eliminating the need for Tiptap API keys.
- The image name for Plane deployment has been changed to plane-space.

141
README.md
View File

@@ -5,8 +5,9 @@
<img src="https://plane-marketing.s3.ap-south-1.amazonaws.com/plane-readme/plane_logo_.webp" alt="Plane Logo" width="70">
</a>
</p>
<h1 align="center"><b>Plane</b></h1>
<p align="center"><b>Open-source project management that unlocks customer value</b></p>
<h3 align="center"><b>Plane</b></h3>
<p align="center"><b>Open-source project management that unlocks customer value.</b></p>
<p align="center">
<a href="https://discord.com/invite/A92xrEGCge">
@@ -16,10 +17,10 @@
</p>
<p align="center">
<a href="https://plane.so/"><b>Website</b></a>
<a href="https://github.com/makeplane/plane/releases"><b>Releases</b></a>
<a href="https://twitter.com/planepowers"><b>Twitter</b></a>
<a href="https://docs.plane.so/"><b>Documentation</b></a>
<a href="https://dub.sh/plane-website-readme"><b>Website</b></a>
<a href="https://git.new/releases"><b>Releases</b></a>
<a href="https://dub.sh/planepowershq"><b>Twitter</b></a>
<a href="https://dub.sh/planedocs"><b>Documentation</b></a>
</p>
<p>
@@ -39,58 +40,83 @@
</a>
</p>
Meet [Plane](https://plane.so/), an open-source project management tool to track issues, run ~sprints~ cycles, and manage product roadmaps without the chaos of managing the tool itself. 🧘‍♀️
Meet [Plane](https://dub.sh/plane-website-readme). An open-source software development tool to manage issues, sprints, and product roadmaps with peace of mind. 🧘‍♀️
> Plane is evolving every day. Your suggestions, ideas, and reported bugs help us immensely. Do not hesitate to join in the conversation on [Discord](https://discord.com/invite/A92xrEGCge) or raise a GitHub issue. We read everything and respond to most.
> Plane is still in its early days, not everything will be perfect yet, and hiccups may happen. Please let us know of any suggestions, ideas, or bugs that you encounter on our [Discord](https://discord.com/invite/A92xrEGCge) or GitHub issues, and we will use your feedback to improve in our upcoming releases.
## 🚀 Installation
## Installation
Getting started with Plane is simple. Choose the setup that works best for you:
The easiest way to get started with Plane is by creating a [Plane Cloud](https://app.plane.so) account where we offer a hosted solution for users.
- **Plane Cloud**
Sign up for a free account on [Plane Cloud](https://app.plane.so)—it's the fastest way to get up and running without worrying about infrastructure.
If you want more control over your data, prefer to self-host Plane, please refer to our [deployment documentation](https://docs.plane.so/docker-compose).
- **Self-host Plane**
Prefer full control over your data and infrastructure? Install and run Plane on your own servers. Follow our detailed [deployment guides](https://developers.plane.so/self-hosting/overview) to get started.
| Installation methods | Docs link |
| Installation Methods | Documentation Link |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| Docker | [![Docker](https://img.shields.io/badge/docker-%230db7ed.svg?style=for-the-badge&logo=docker&logoColor=white)](https://developers.plane.so/self-hosting/methods/docker-compose) |
| Kubernetes | [![Kubernetes](https://img.shields.io/badge/kubernetes-%23326ce5.svg?style=for-the-badge&logo=kubernetes&logoColor=white)](https://developers.plane.so/self-hosting/methods/kubernetes) |
| Docker | [![Docker](https://img.shields.io/badge/docker-%230db7ed.svg?style=for-the-badge&logo=docker&logoColor=white)](https://docs.plane.so/self-hosting/methods/docker-compose) |
| Kubernetes | [![Kubernetes](https://img.shields.io/badge/kubernetes-%23326ce5.svg?style=for-the-badge&logo=kubernetes&logoColor=white)](https://docs.plane.so/kubernetes) |
`Instance admins` can configure instance settings with [God mode](https://developers.plane.so/self-hosting/govern/instance-admin).
`Instance admin` can configure instance settings using our [God-mode](https://docs.plane.so/instance-admin) feature.
## 🌟 Features
## 🚀 Features
- **Issues**
Efficiently create and manage tasks with a robust rich text editor that supports file uploads. Enhance organization and tracking by adding sub-properties and referencing related issues.
- **Issues**: Quickly create issues and add details using a powerful rich text editor that supports file uploads. Add sub-properties and references to problems for better organization and tracking.
- **Cycles**
Maintain your teams momentum with Cycles. Track progress effortlessly using burn-down charts and other insightful tools.
- **Cycles**:
Keep up your team's momentum with Cycles. Gain insights into your project's progress with burn-down charts and other valuable features.
- **Modules**
Simplify complex projects by dividing them into smaller, manageable modules.
- **Modules**: Break down your large projects into smaller, more manageable modules. Assign modules between teams to track and plan your project's progress easily.
- **Views**
Customize your workflow by creating filters to display only the most relevant issues. Save and share these views with ease.
- **Views**: Create custom filters to display only the issues that matter to you. Save and share your filters in just a few clicks.
- **Pages**
Capture and organize ideas using Plane Pages, complete with AI capabilities and a rich text editor. Format text, insert images, add hyperlinks, or convert your notes into actionable items.
- **Pages**: Plane pages, equipped with AI and a rich text editor, let you jot down your thoughts on the fly. Format your text, upload images, hyperlink, or sync your existing ideas into an actionable item or issue.
- **Analytics**
Access real-time insights across all your Plane data. Visualize trends, remove blockers, and keep your projects moving forward.
- **Analytics**: Get insights into all your Plane data in real-time. Visualize issue data to spot trends, remove blockers, and progress your work.
- **Drive** (_coming soon_): The drive helps you share documents, images, videos, or any other files that make sense to you or your team and align on the problem/solution.
## 🛠️ Quick start for contributors
## 🛠️ Local development
> Development system must have docker engine installed and running.
See [CONTRIBUTING](./CONTRIBUTING.md)
Setting up local environment is extremely easy and straight forward. Follow the below step and you will be ready to contribute -
## ⚙️ Built with
[![Next JS](https://img.shields.io/badge/next.js-000000?style=for-the-badge&logo=nextdotjs&logoColor=white)](https://nextjs.org/)
[![Django](https://img.shields.io/badge/Django-092E20?style=for-the-badge&logo=django&logoColor=green)](https://www.djangoproject.com/)
[![Node JS](https://img.shields.io/badge/node.js-339933?style=for-the-badge&logo=Node.js&logoColor=white)](https://nodejs.org/en)
1. Clone the code locally using:
```
git clone https://github.com/makeplane/plane.git
```
2. Switch to the code folder:
```
cd plane
```
3. Create your feature or fix branch you plan to work on using:
```
git checkout -b <feature-branch-name>
```
4. Open terminal and run:
```
./setup.sh
```
5. Open the code on VSCode or similar equivalent IDE.
6. Review the `.env` files available in various folders.
Visit [Environment Setup](./ENV_SETUP.md) to know about various environment variables used in system.
7. Run the docker command to initiate services:
```
docker compose -f docker-compose-local.yml up -d
```
You are ready to make changes to the code. Do not forget to refresh the browser (in case it does not auto-reload).
Thats it!
## ❤️ Community
The Plane community can be found on [GitHub Discussions](https://github.com/orgs/makeplane/discussions), and our [Discord server](https://discord.com/invite/A92xrEGCge). Our [Code of conduct](https://github.com/makeplane/plane/blob/master/CODE_OF_CONDUCT.md) applies to all Plane community chanels.
Ask questions, report bugs, join discussions, voice ideas, make feature requests, or share your projects.
### Repo Activity
![Plane Repo Activity](https://repobeats.axiom.co/api/embed/2523c6ed2f77c082b7908c33e2ab208981d76c39.svg "Repobeats analytics image")
## 📸 Screenshots
@@ -139,7 +165,7 @@ See [CONTRIBUTING](./CONTRIBUTING.md)
</a>
</p>
</p>
<p>
<p>
<a href="https://plane.so" target="_blank">
<img
src="https://ik.imagekit.io/w2okwbtu2/Drive_LlfeY4xn3.png?updatedAt=1709298837917"
@@ -150,42 +176,23 @@ See [CONTRIBUTING](./CONTRIBUTING.md)
</p>
</p>
## 📝 Documentation
Explore Plane's [product documentation](https://docs.plane.so/) and [developer documentation](https://developers.plane.so/) to learn about features, setup, and usage.
## ⛓️ Security
## ❤️ Community
If you believe you have found a security vulnerability in Plane, we encourage you to responsibly disclose this and not open a public issue. We will investigate all legitimate reports.
Join the Plane community on [GitHub Discussions](https://github.com/orgs/makeplane/discussions) and our [Discord server](https://discord.com/invite/A92xrEGCge). We follow a [Code of conduct](https://github.com/makeplane/plane/blob/master/CODE_OF_CONDUCT.md) in all our community channels.
Email squawk@plane.so to disclose any security vulnerabilities.
Feel free to ask questions, report bugs, participate in discussions, share ideas, request features, or showcase your projects. Wed love to hear from you!
## ❤️ Contribute
## 🛡️ Security
There are many ways to contribute to Plane, including:
If you discover a security vulnerability in Plane, please report it responsibly instead of opening a public issue. We take all legitimate reports seriously and will investigate them promptly. See [Security policy](https://github.com/makeplane/plane/blob/master/SECURITY.md) for more info.
To disclose any security issues, please email us at security@plane.so.
## 🤝 Contributing
There are many ways you can contribute to Plane:
- Report [bugs](https://github.com/makeplane/plane/issues/new?assignees=srinivaspendem%2Cpushya22&labels=%F0%9F%90%9Bbug&projects=&template=--bug-report.yaml&title=%5Bbug%5D%3A+) or submit [feature requests](https://github.com/makeplane/plane/issues/new?assignees=srinivaspendem%2Cpushya22&labels=%E2%9C%A8feature&projects=&template=--feature-request.yaml&title=%5Bfeature%5D%3A+).
- Review the [documentation](https://docs.plane.so/) and submit [pull requests](https://github.com/makeplane/docs) to improve it—whether it's fixing typos or adding new content.
- Talk or write about Plane or any other ecosystem integration and [let us know](https://discord.com/invite/A92xrEGCge)!
- Show your support by upvoting [popular feature requests](https://github.com/makeplane/plane/issues).
Please read [CONTRIBUTING.md](https://github.com/makeplane/plane/blob/master/CONTRIBUTING.md) for details on the process for submitting pull requests to us.
### Repo activity
![Plane Repo Activity](https://repobeats.axiom.co/api/embed/2523c6ed2f77c082b7908c33e2ab208981d76c39.svg "Repobeats analytics image")
- Submitting [bugs](https://github.com/makeplane/plane/issues/new?assignees=srinivaspendem%2Cpushya22&labels=%F0%9F%90%9Bbug&projects=&template=--bug-report.yaml&title=%5Bbug%5D%3A+) and [feature requests](https://github.com/makeplane/plane/issues/new?assignees=srinivaspendem%2Cpushya22&labels=%E2%9C%A8feature&projects=&template=--feature-request.yaml&title=%5Bfeature%5D%3A+) for various components.
- Reviewing [the documentation](https://docs.plane.so/) and submitting [pull requests](https://github.com/makeplane/plane), from fixing typos to adding new features.
- Speaking or writing about Plane or any other ecosystem integration and [letting us know](https://discord.com/invite/A92xrEGCge)!
- Upvoting [popular feature requests](https://github.com/makeplane/plane/issues) to show your support.
### We couldn't have done this without you.
<a href="https://github.com/makeplane/plane/graphs/contributors">
<img src="https://contrib.rocks/image?repo=makeplane/plane" />
</a>
## License
This project is licensed under the [GNU Affero General Public License v3.0](https://github.com/makeplane/plane/blob/master/LICENSE.txt).

View File

@@ -1,39 +1,44 @@
# Security policy
This document outlines the security protocols and vulnerability reporting guidelines for the Plane project. Ensuring the security of our systems is a top priority, and while we work diligently to maintain robust protection, vulnerabilities may still occur. We highly value the communitys role in identifying and reporting security concerns to uphold the integrity of our systems and safeguard our users.
# Security Policy
## Reporting a vulnerability
If you have identified a security vulnerability, submit your findings to [security@plane.so](mailto:security@plane.so).
Ensure your report includes all relevant information needed for us to reproduce and assess the issue. Include the IP address or URL of the affected system.
This document outlines security procedures and vulnerabilities reporting for the Plane project.
To ensure a responsible and effective disclosure process, please adhere to the following:
At Plane, we safeguarding the security of our systems with top priority. Despite our efforts, vulnerabilities may still exist. We greatly appreciate your assistance in identifying and reporting any such vulnerabilities to help us maintain the integrity of our systems and protect our clients.
- Maintain confidentiality and refrain from publicly disclosing the vulnerability until we have had the opportunity to investigate and address the issue.
- Refrain from running automated vulnerability scans on our infrastructure or dashboard without prior consent. Contact us to set up a sandbox environment if necessary.
- Do not exploit any discovered vulnerabilities for malicious purposes, such as accessing or altering user data.
- Do not engage in physical security attacks, social engineering, distributed denial of service (DDoS) attacks, spam campaigns, or attacks on third-party applications as part of your vulnerability testing.
To report a security vulnerability, please email us directly at security@plane.so with a detailed description of the vulnerability and steps to reproduce it. Please refrain from disclosing the vulnerability publicly until we have had an opportunity to review and address it.
## Out of scope
While we appreciate all efforts to assist in improving our security, please note that the following types of vulnerabilities are considered out of scope:
## Out of Scope Vulnerabilities
- Vulnerabilities requiring man-in-the-middle (MITM) attacks or physical access to a users device.
- Content spoofing or text injection issues without a clear attack vector or the ability to modify HTML/CSS.
- Issues related to email spoofing.
- Missing DNSSEC, CAA, or CSP headers.
- Absence of secure or HTTP-only flags on non-sensitive cookies.
We appreciate your help in identifying vulnerabilities. However, please note that the following types of vulnerabilities are considered out of scope:
## Our commitment
- Attacks requiring MITM or physical access to a user's device.
- Content spoofing and text injection issues without demonstrating an attack vector or ability to modify HTML/CSS.
- Email spoofing.
- Missing DNSSEC, CAA, CSP headers.
- Lack of Secure or HTTP only flag on non-sensitive cookies.
At Plane, we are committed to maintaining transparent and collaborative communication throughout the vulnerability resolution process. Here's what you can expect from us:
## Reporting Process
- **Response Time** <br/>
We will acknowledge receipt of your vulnerability report within three business days and provide an estimated timeline for resolution.
- **Legal Protection** <br/>
We will not initiate legal action against you for reporting vulnerabilities, provided you adhere to the reporting guidelines.
- **Confidentiality** <br/>
Your report will be treated with confidentiality. We will not disclose your personal information to third parties without your consent.
- **Recognition** <br/>
With your permission, we are happy to publicly acknowledge your contribution to improving our security once the issue is resolved.
- **Timely Resolution** <br/>
We are committed to working closely with you throughout the resolution process, providing timely updates as necessary. Our goal is to address all reported vulnerabilities swiftly, and we will actively engage with you to coordinate a responsible disclosure once the issue is fully resolved.
If you discover a vulnerability, please adhere to the following reporting process:
We appreciate your help in ensuring the security of our platform. Your contributions are crucial to protecting our users and maintaining a secure environment. Thank you for working with us to keep Plane safe.
1. Email your findings to security@plane.so.
2. Refrain from running automated scanners on our infrastructure or dashboard without prior consent. Contact us to set up a sandbox environment if necessary.
3. Do not exploit the vulnerability for malicious purposes, such as downloading excessive data or altering user data.
4. Maintain confidentiality and refrain from disclosing the vulnerability until it has been resolved.
5. Avoid using physical security attacks, social engineering, distributed denial of service, spam, or third-party applications.
When reporting a vulnerability, please provide sufficient information to allow us to reproduce and address the issue promptly. Include the IP address or URL of the affected system, along with a detailed description of the vulnerability.
## Our Commitment
We are committed to promptly addressing reported vulnerabilities and maintaining open communication throughout the resolution process. Here's what you can expect from us:
- **Response Time:** We will acknowledge receipt of your report within three business days and provide an expected resolution date.
- **Legal Protection:** We will not pursue legal action against you for reporting vulnerabilities, provided you adhere to the reporting guidelines.
- **Confidentiality:** Your report will be treated with strict confidentiality. We will not disclose your personal information to third parties without your consent.
- **Progress Updates:** We will keep you informed of our progress in resolving the reported vulnerability.
- **Recognition:** With your permission, we will publicly acknowledge you as the discoverer of the vulnerability.
- **Timely Resolution:** We strive to resolve all reported vulnerabilities promptly and will actively participate in the publication process once the issue is resolved.
We appreciate your cooperation in helping us maintain the security of our systems and protecting our clients. Thank you for your contributions to our security efforts.
reference: https://supabase.com/.well-known/security.txt

View File

@@ -1,182 +0,0 @@
ARG BASE_TAG=develop
ARG BUILD_TYPE=full
# *****************************************************************************
# STAGE 1: Build the project
# *****************************************************************************
FROM node:18-alpine AS builder
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
RUN yarn global add turbo
COPY . .
RUN turbo prune --scope=web --scope=space --scope=admin --docker
# *****************************************************************************
# STAGE 2: Install dependencies & build the project
# *****************************************************************************
# Add lockfile and package.json's of isolated subworkspace
FROM node:18-alpine AS installer
RUN apk add --no-cache libc6-compat
WORKDIR /app
# First install the dependencies (as they change less often)
COPY .gitignore .gitignore
COPY --from=builder /app/out/json/ .
COPY --from=builder /app/out/yarn.lock ./yarn.lock
RUN yarn install
# # Build the project
COPY --from=builder /app/out/full/ .
COPY turbo.json turbo.json
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
RUN yarn turbo run build --filter=web --filter=space --filter=admin
# *****************************************************************************
# STAGE 3: Copy the project and start it
# *****************************************************************************
FROM makeplane/plane-aio-base:${BUILD_TYPE}-${BASE_TAG} AS runner
WORKDIR /app
SHELL [ "/bin/bash", "-c" ]
# PYTHON APPLICATION SETUP
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
COPY apps/api/requirements.txt ./api/
COPY apps/api/requirements ./api/requirements
RUN pip install -r ./api/requirements.txt --compile --no-cache-dir
# Add in Django deps and generate Django's static files
COPY apps/api/manage.py ./api/manage.py
COPY apps/api/plane ./api/plane/
COPY apps/api/templates ./api/templates/
COPY package.json ./api/package.json
COPY apps/api/bin ./api/bin/
RUN chmod +x ./api/bin/*
RUN chmod -R 777 ./api/
# NEXTJS BUILDS
COPY --from=installer /app/web/next.config.js ./web/
COPY --from=installer /app/web/package.json ./web/
COPY --from=installer /app/web/.next/standalone ./web
COPY --from=installer /app/web/.next/static ./web/web/.next/static
COPY --from=installer /app/web/public ./web/web/public
COPY --from=installer /app/space/next.config.js ./space/
COPY --from=installer /app/space/package.json ./space/
COPY --from=installer /app/space/.next/standalone ./space
COPY --from=installer /app/space/.next/static ./space/space/.next/static
COPY --from=installer /app/space/public ./space/space/public
COPY --from=installer /app/admin/next.config.js ./admin/
COPY --from=installer /app/admin/package.json ./admin/
COPY --from=installer /app/admin/.next/standalone ./admin
COPY --from=installer /app/admin/.next/static ./admin/admin/.next/static
COPY --from=installer /app/admin/public ./admin/admin/public
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
ARG BUILD_TYPE=full
ENV BUILD_TYPE=$BUILD_TYPE
COPY aio/supervisord-${BUILD_TYPE}-base /app/supervisord.conf
COPY aio/supervisord-app /app/supervisord-app
RUN cat /app/supervisord-app >> /app/supervisord.conf && \
rm /app/supervisord-app
COPY ./aio/nginx.conf /etc/nginx/nginx.conf.template
# if build type is full, run the below copy pg-setup.sh
COPY aio/postgresql.conf /etc/postgresql/postgresql.conf
COPY aio/pg-setup.sh /app/pg-setup.sh
RUN chmod +x /app/pg-setup.sh
# *****************************************************************************
# APPLICATION ENVIRONMENT SETTINGS
# *****************************************************************************
ENV APP_DOMAIN=localhost
ENV WEB_URL=http://${APP_DOMAIN}
ENV DEBUG=0
ENV CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN},https://${APP_DOMAIN}
# Secret Key
ENV SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5
# Gunicorn Workers
ENV GUNICORN_WORKERS=1
ENV POSTGRES_USER="plane"
ENV POSTGRES_PASSWORD="plane"
ENV POSTGRES_DB="plane"
ENV POSTGRES_HOST="localhost"
ENV POSTGRES_PORT="5432"
ENV DATABASE_URL="postgresql://plane:plane@localhost:5432/plane"
ENV REDIS_HOST="localhost"
ENV REDIS_PORT="6379"
ENV REDIS_URL="redis://localhost:6379"
ENV USE_MINIO="1"
ENV AWS_REGION=""
ENV AWS_ACCESS_KEY_ID="access-key"
ENV AWS_SECRET_ACCESS_KEY="secret-key"
ENV AWS_S3_ENDPOINT_URL="http://localhost:9000"
ENV AWS_S3_BUCKET_NAME="uploads"
ENV MINIO_ROOT_USER="access-key"
ENV MINIO_ROOT_PASSWORD="secret-key"
ENV BUCKET_NAME="uploads"
ENV FILE_SIZE_LIMIT="5242880"
# *****************************************************************************
RUN /app/pg-setup.sh
CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"]

View File

@@ -1,73 +0,0 @@
FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt
FROM python:3.12-slim
# Set environment variables to non-interactive for apt
ENV DEBIAN_FRONTEND=noninteractive
ENV BUILD_TYPE=full
SHELL [ "/bin/bash", "-c" ]
WORKDIR /app
RUN mkdir -p /app/{data,logs} && \
mkdir -p /app/data/{redis,pg,minio,nginx} && \
mkdir -p /app/logs/{access,error} && \
mkdir -p /etc/supervisor/conf.d
# Update the package list and install prerequisites
RUN apt-get update && \
apt-get install -y \
gnupg2 curl ca-certificates lsb-release software-properties-common \
build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \
libsqlite3-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils \
tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu \
sudo lsof net-tools libpq-dev procps gettext
# Install Redis 7.2
RUN echo "deb http://deb.debian.org/debian $(lsb_release -cs)-backports main" > /etc/apt/sources.list.d/backports.list && \
curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" > /etc/apt/sources.list.d/redis.list && \
apt-get update && \
apt-get install -y redis-server
# Install PostgreSQL 15
ENV POSTGRES_VERSION=15
RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/pgdg-archive-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/pgdg-archive-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
apt-get install -y postgresql-$POSTGRES_VERSION postgresql-client-$POSTGRES_VERSION && \
mkdir -p /var/lib/postgresql/data && \
chown -R postgres:postgres /var/lib/postgresql
COPY postgresql.conf /etc/postgresql/postgresql.conf
RUN sudo -u postgres /usr/lib/postgresql/$POSTGRES_VERSION/bin/initdb -D /var/lib/postgresql/data
# Install MinIO
ARG TARGETARCH
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -fSl https://dl.min.io/server/minio/release/linux-amd64/minio -o /usr/local/bin/minio; \
elif [ "$TARGETARCH" = "arm64" ]; then \
curl -fSl https://dl.min.io/server/minio/release/linux-arm64/minio -o /usr/local/bin/minio; \
else \
echo "Unsupported architecture: $TARGETARCH"; exit 1; \
fi && \
chmod +x /usr/local/bin/minio
# Install Node.js 18
RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \
apt-get install -y nodejs && \
python -m pip install --upgrade pip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create Supervisor configuration file
COPY supervisord-full-base /app/supervisord.conf
COPY nginx.conf /etc/nginx/nginx.conf.template
COPY env.sh /app/nginx-start.sh
RUN chmod +x /app/nginx-start.sh
# Expose ports for Redis, PostgreSQL, and MinIO
EXPOSE 6379 5432 9000 80 443
# Start Supervisor
CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"]

View File

@@ -1,45 +0,0 @@
FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt
FROM python:3.12-slim
# Set environment variables to non-interactive for apt
ENV DEBIAN_FRONTEND=noninteractive
ENV BUILD_TYPE=slim
SHELL [ "/bin/bash", "-c" ]
WORKDIR /app
RUN mkdir -p /app/{data,logs} && \
mkdir -p /app/data/{nginx} && \
mkdir -p /app/logs/{access,error} && \
mkdir -p /etc/supervisor/conf.d
# Update the package list and install prerequisites
RUN apt-get update && \
apt-get install -y \
gnupg2 curl ca-certificates lsb-release software-properties-common \
build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \
libsqlite3-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils \
tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu \
sudo lsof net-tools libpq-dev procps gettext
# Install Node.js 18
RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \
apt-get install -y nodejs
RUN python -m pip install --upgrade pip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create Supervisor configuration file
COPY supervisord-slim-base /app/supervisord.conf
COPY nginx.conf /etc/nginx/nginx.conf.template
COPY env.sh /app/nginx-start.sh
RUN chmod +x /app/nginx-start.sh
# Expose ports for Redis, PostgreSQL, and MinIO
EXPOSE 80 443
# Start Supervisor
CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"]

View File

@@ -1,7 +0,0 @@
#!/bin/bash
export dollar="$"
export http_upgrade="http_upgrade"
export scheme="scheme"
envsubst < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
exec nginx -g 'daemon off;'

View File

@@ -1,72 +0,0 @@
events {
}
http {
sendfile on;
server {
listen 80;
root /www/data/;
access_log /var/log/nginx/access.log;
client_max_body_size ${FILE_SIZE_LIMIT};
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Permissions-Policy "interest-cohort=()" always;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Forwarded-Proto "${dollar}scheme";
add_header X-Forwarded-Host "${dollar}host";
add_header X-Forwarded-For "${dollar}proxy_add_x_forwarded_for";
add_header X-Real-IP "${dollar}remote_addr";
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://localhost:3001/;
}
location /spaces/ {
rewrite ^/spaces/?$ /spaces/login break;
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://localhost:3002/spaces/;
}
location /god-mode/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://localhost:3003/god-mode/;
}
location /api/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://localhost:8000/api/;
}
location /auth/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://localhost:8000/auth/;
}
location /${BUCKET_NAME}/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://localhost:9000/uploads/;
}
}
}

View File

@@ -1,14 +0,0 @@
#!/bin/bash
if [ "$BUILD_TYPE" == "full" ]; then
export PGHOST=localhost
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data start
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "CREATE USER $POSTGRES_USER WITH SUPERUSER PASSWORD '$POSTGRES_PASSWORD';" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/createdb" -O "$POSTGRES_USER" "$POSTGRES_DB" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_DB TO $POSTGRES_USER;" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data stop
fi

View File

@@ -1,815 +0,0 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/data' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/15/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/15/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/15-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 256MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is usually the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # limited by max_parallel_workers
#max_parallel_maintenance_workers = 2 # limited by max_parallel_workers
#max_parallel_workers = 8 # number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enables compression of full-page writes;
# off, pglz, lz4, zstd, or on
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Prefetching during recovery -
#recovery_prefetch = try # prefetch pages referenced in the WAL?
#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
# (change requires restart)
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_library = '' # library to use to archive a logfile segment
# (empty string indicates archive_command should
# be used)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#recursive_worktable_factor = 10.0 # range 0.001-1000000
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, jsonlog, syslog, and
# eventlog, depending on platform.
# csvlog and jsonlog require
# logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr, jsonlog,
# and csvlog into log files. Required
# to be on for csvlogs and jsonlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
#log_startup_progress_interval = 10s # Time between progress updates for
# long-running startup operations.
# 0 disables the feature, > 0 indicates
# the interval in milliseconds.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = 10min # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = on
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '15/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Cumulative Query and Index Statistics -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_fetch_consistency = cache
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
# include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@@ -1,71 +0,0 @@
[program:web]
command=node /app/web/web/server.js
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3001,HOSTNAME=0.0.0.0
[program:space]
command=node /app/space/space/server.js
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3002,HOSTNAME=0.0.0.0
[program:admin]
command=node /app/admin/admin/server.js
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3003,HOSTNAME=0.0.0.0
[program:migrator]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-migrator.sh"
autostart=true
autorestart=false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:api]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-api.sh"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:worker]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-worker.sh"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:beat]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-beat.sh"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0

View File

@@ -1,38 +0,0 @@
[supervisord]
user=root
nodaemon=true
stderr_logfile=/app/logs/error/supervisor.err.log
stdout_logfile=/app/logs/access/supervisor.log
[program:redis]
directory=/app/data/redis
command=redis-server
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/redis.err.log
stdout_logfile=/app/logs/access/redis.log
[program:postgresql]
user=postgres
command=/usr/lib/postgresql/15/bin/postgres --config-file=/etc/postgresql/postgresql.conf
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/postgresql.err.log
stdout_logfile=/app/logs/access/postgresql.log
[program:minio]
directory=/app/data/minio
command=minio server /app/data/minio
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/minio.err.log
stdout_logfile=/app/logs/access/minio.log
[program:nginx]
directory=/app/data/nginx
command=/app/nginx-start.sh
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/nginx.err.log
stdout_logfile=/app/logs/access/nginx.log

View File

@@ -1,14 +0,0 @@
[supervisord]
user=root
nodaemon=true
stderr_logfile=/app/logs/error/supervisor.err.log
stdout_logfile=/app/logs/access/supervisor.log
[program:nginx]
directory=/app/data/nginx
command=/app/nginx-start.sh
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/nginx.err.log
stdout_logfile=/app/logs/access/nginx.log

46
apiserver/.env.example Normal file
View File

@@ -0,0 +1,46 @@
# Backend
# Debug value for api server use it as 0 for production use
DEBUG=0
CORS_ALLOWED_ORIGINS=""
# Error logs
SENTRY_DSN=""
SENTRY_ENVIRONMENT="development"
# Database Settings
POSTGRES_USER="plane"
POSTGRES_PASSWORD="plane"
POSTGRES_HOST="plane-db"
POSTGRES_DB="plane"
DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}/${POSTGRES_DB}
# Redis Settings
REDIS_HOST="plane-redis"
REDIS_PORT="6379"
REDIS_URL="redis://${REDIS_HOST}:6379/"
# AWS Settings
AWS_REGION=""
AWS_ACCESS_KEY_ID="access-key"
AWS_SECRET_ACCESS_KEY="secret-key"
AWS_S3_ENDPOINT_URL="http://plane-minio:9000"
# Changing this requires change in the nginx.conf for uploads if using minio setup
AWS_S3_BUCKET_NAME="uploads"
# Maximum file upload limit
FILE_SIZE_LIMIT=5242880
# Settings related to Docker
DOCKERIZED=1 # deprecated
# set to 1 If using the pre-configured minio setup
USE_MINIO=1
# Nginx Configuration
NGINX_PORT=80
# Email redirections and minio domain settings
WEB_URL="http://localhost"
# Gunicorn Workers
GUNICORN_WORKERS=2

52
apiserver/Dockerfile.api Normal file
View File

@@ -0,0 +1,52 @@
FROM python:3.11.1-alpine3.17 AS backend
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
WORKDIR /code
RUN apk --no-cache add \
"libpq~=15" \
"libxslt~=1.1" \
"nodejs-current~=19" \
"xmlsec~=1.2"
COPY requirements.txt ./
COPY requirements ./requirements
RUN apk add --no-cache libffi-dev
RUN apk add --no-cache --virtual .build-deps \
"bash~=5.2" \
"g++~=12.2" \
"gcc~=12.2" \
"cargo~=1.64" \
"git~=2" \
"make~=4.3" \
"postgresql13-dev~=13" \
"libc-dev" \
"linux-headers" \
&& \
pip install -r requirements.txt --compile --no-cache-dir \
&& \
apk del .build-deps
# Add in Django deps and generate Django's static files
COPY manage.py manage.py
COPY plane plane/
COPY templates templates/
COPY package.json package.json
RUN apk --no-cache add "bash~=5.2"
COPY ./bin ./bin/
RUN mkdir -p /code/plane/logs
RUN chmod +x ./bin/takeoff ./bin/worker ./bin/beat
RUN chmod -R 777 /code
# Expose container port and run entry point script
EXPOSE 8000
# CMD [ "./bin/takeoff" ]

45
apiserver/Dockerfile.dev Normal file
View File

@@ -0,0 +1,45 @@
FROM python:3.11.1-alpine3.17 AS backend
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
RUN apk --no-cache add \
"bash~=5.2" \
"libpq~=15" \
"libxslt~=1.1" \
"nodejs-current~=19" \
"xmlsec~=1.2" \
"libffi-dev" \
"bash~=5.2" \
"g++~=12.2" \
"gcc~=12.2" \
"cargo~=1.64" \
"git~=2" \
"make~=4.3" \
"postgresql13-dev~=13" \
"libc-dev" \
"linux-headers"
WORKDIR /code
COPY requirements.txt ./requirements.txt
ADD requirements ./requirements
# Install the local development settings
RUN pip install -r requirements/local.txt --compile --no-cache-dir
COPY . .
RUN mkdir -p /code/plane/logs
RUN chmod -R +x /code/bin
RUN chmod -R 777 /code
# Expose container port and run entry point script
EXPOSE 8000
CMD [ "./bin/takeoff.local" ]

3
apiserver/Procfile Normal file
View File

@@ -0,0 +1,3 @@
web: gunicorn -w 4 -k uvicorn.workers.UvicornWorker plane.asgi:application --bind 0.0.0.0:$PORT --max-requests 10000 --max-requests-jitter 1000 --access-logfile -
worker: celery -A plane worker -l info
beat: celery -A plane beat -l INFO

238
apiserver/back_migration.py Normal file
View File

@@ -0,0 +1,238 @@
# All the python scripts that are used for back migrations
import uuid
import random
from django.contrib.auth.hashers import make_password
from plane.db.models import ProjectIdentifier
from plane.db.models import (
Issue,
IssueComment,
User,
Project,
ProjectMember,
Label,
Integration,
)
# Update description and description html values for old descriptions
def update_description():
try:
issues = Issue.objects.all()
updated_issues = []
for issue in issues:
issue.description_html = f"<p>{issue.description}</p>"
issue.description_stripped = issue.description
updated_issues.append(issue)
Issue.objects.bulk_update(
updated_issues,
["description_html", "description_stripped"],
batch_size=100,
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_comments():
try:
issue_comments = IssueComment.objects.all()
updated_issue_comments = []
for issue_comment in issue_comments:
issue_comment.comment_html = (
f"<p>{issue_comment.comment_stripped}</p>"
)
updated_issue_comments.append(issue_comment)
IssueComment.objects.bulk_update(
updated_issue_comments, ["comment_html"], batch_size=100
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_project_identifiers():
try:
project_identifiers = ProjectIdentifier.objects.filter(
workspace_id=None
).select_related("project", "project__workspace")
updated_identifiers = []
for identifier in project_identifiers:
identifier.workspace_id = identifier.project.workspace_id
updated_identifiers.append(identifier)
ProjectIdentifier.objects.bulk_update(
updated_identifiers, ["workspace_id"], batch_size=50
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_user_empty_password():
try:
users = User.objects.filter(password="")
updated_users = []
for user in users:
user.password = make_password(uuid.uuid4().hex)
user.is_password_autoset = True
updated_users.append(user)
User.objects.bulk_update(updated_users, ["password"], batch_size=50)
print("Success")
except Exception as e:
print(e)
print("Failed")
def updated_issue_sort_order():
try:
issues = Issue.objects.all()
updated_issues = []
for issue in issues:
issue.sort_order = issue.sequence_id * random.randint(100, 500)
updated_issues.append(issue)
Issue.objects.bulk_update(
updated_issues, ["sort_order"], batch_size=100
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_project_cover_images():
try:
project_cover_images = [
"https://images.unsplash.com/photo-1677432658720-3d84f9d657b4?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1661107564401-57497d8fe86f?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1332&q=80",
"https://images.unsplash.com/photo-1677352241429-dc90cfc7a623?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1332&q=80",
"https://images.unsplash.com/photo-1677196728306-eeafea692454?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1331&q=80",
"https://images.unsplash.com/photo-1660902179734-c94c944f7830?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1255&q=80",
"https://images.unsplash.com/photo-1672243775941-10d763d9adef?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1677040628614-53936ff66632?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1676920410907-8d5f8dd4b5ba?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1332&q=80",
"https://images.unsplash.com/photo-1676846328604-ce831c481346?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1155&q=80",
"https://images.unsplash.com/photo-1676744843212-09b7e64c3a05?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1676798531090-1608bedeac7b?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1597088758740-56fd7ec8a3f0?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1169&q=80",
"https://images.unsplash.com/photo-1676638392418-80aad7c87b96?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=774&q=80",
"https://images.unsplash.com/photo-1649639194967-2fec0b4ea7bc?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1675883086902-b453b3f8146e?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=774&q=80",
"https://images.unsplash.com/photo-1675887057159-40fca28fdc5d?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1173&q=80",
"https://images.unsplash.com/photo-1675373980203-f84c5a672aa5?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1675191475318-d2bf6bad1200?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1332&q=80",
"https://images.unsplash.com/photo-1675456230532-2194d0c4bcc0?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1170&q=80",
"https://images.unsplash.com/photo-1675371788315-60fa0ef48267?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1332&q=80",
]
projects = Project.objects.all()
updated_projects = []
for project in projects:
project.cover_image = project_cover_images[random.randint(0, 19)]
updated_projects.append(project)
Project.objects.bulk_update(
updated_projects, ["cover_image"], batch_size=100
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_user_view_property():
try:
project_members = ProjectMember.objects.all()
updated_project_members = []
for project_member in project_members:
project_member.default_props = {
"filters": {"type": None},
"orderBy": "-created_at",
"collapsed": True,
"issueView": "list",
"filterIssue": None,
"groupByProperty": None,
"showEmptyGroups": True,
}
updated_project_members.append(project_member)
ProjectMember.objects.bulk_update(
updated_project_members, ["default_props"], batch_size=100
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_label_color():
try:
labels = Label.objects.filter(color="")
updated_labels = []
for label in labels:
label.color = f"#{random.randint(0, 0xFFFFFF+1):06X}"
updated_labels.append(label)
Label.objects.bulk_update(updated_labels, ["color"], batch_size=100)
print("Success")
except Exception as e:
print(e)
print("Failed")
def create_slack_integration():
try:
_ = Integration.objects.create(
provider="slack", network=2, title="Slack"
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_integration_verified():
try:
integrations = Integration.objects.all()
updated_integrations = []
for integration in integrations:
integration.verified = True
updated_integrations.append(integration)
Integration.objects.bulk_update(
updated_integrations, ["verified"], batch_size=10
)
print("Success")
except Exception as e:
print(e)
print("Failed")
def update_start_date():
try:
issues = Issue.objects.filter(
state__group__in=["started", "completed"]
)
updated_issues = []
for issue in issues:
issue.start_date = issue.created_at.date()
updated_issues.append(issue)
Issue.objects.bulk_update(
updated_issues, ["start_date"], batch_size=500
)
print("Success")
except Exception as e:
print(e)
print("Failed")

35
apiserver/bin/takeoff.local Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -e
python manage.py wait_for_db
# Wait for migrations
python manage.py wait_for_migrations
# Create the default bucket
#!/bin/bash
# Collect system information
HOSTNAME=$(hostname)
MAC_ADDRESS=$(ip link show | awk '/ether/ {print $2}' | head -n 1)
CPU_INFO=$(cat /proc/cpuinfo)
MEMORY_INFO=$(free -h)
DISK_INFO=$(df -h)
# Concatenate information and compute SHA-256 hash
SIGNATURE=$(echo "$HOSTNAME$MAC_ADDRESS$CPU_INFO$MEMORY_INFO$DISK_INFO" | sha256sum | awk '{print $1}')
# Export the variables
export MACHINE_SIGNATURE=$SIGNATURE
# Register instance
python manage.py register_instance "$MACHINE_SIGNATURE"
# Load the configuration variable
python manage.py configure_instance
# Create the default bucket
python manage.py create_bucket
# Clear Cache before starting to remove stale values
python manage.py clear_cache
python manage.py runserver 0.0.0.0:8000 --settings=plane.settings.local

View File

@@ -3,7 +3,9 @@ import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "plane.settings.production")
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "plane.settings.production"
)
try:
from django.core.management import execute_from_command_line
except ImportError as exc:

4
apiserver/package.json Normal file
View File

@@ -0,0 +1,4 @@
{
"name": "plane-api",
"version": "0.17.0"
}

View File

@@ -0,0 +1,50 @@
# Django imports
from django.utils import timezone
from django.db.models import Q
# Third party imports
from rest_framework import authentication
from rest_framework.exceptions import AuthenticationFailed
# Module imports
from plane.db.models import APIToken
class APIKeyAuthentication(authentication.BaseAuthentication):
"""
Authentication with an API Key
"""
www_authenticate_realm = "api"
media_type = "application/json"
auth_header_name = "X-Api-Key"
def get_api_token(self, request):
return request.headers.get(self.auth_header_name)
def validate_api_token(self, token):
try:
api_token = APIToken.objects.get(
Q(
Q(expired_at__gt=timezone.now())
| Q(expired_at__isnull=True)
),
token=token,
is_active=True,
)
except APIToken.DoesNotExist:
raise AuthenticationFailed("Given API token is not valid")
# save api token last used
api_token.last_used = timezone.now()
api_token.save(update_fields=["last_used"])
return (api_token.user, api_token.token)
def authenticate(self, request):
token = self.get_api_token(request=request)
if not token:
return None
# Validate the API token
user, token = self.validate_api_token(token)
return user, token

View File

@@ -0,0 +1,42 @@
from rest_framework.throttling import SimpleRateThrottle
class ApiKeyRateThrottle(SimpleRateThrottle):
scope = "api_key"
rate = "60/minute"
def get_cache_key(self, request, view):
# Retrieve the API key from the request header
api_key = request.headers.get("X-Api-Key")
if not api_key:
return None # Allow the request if there's no API key
# Use the API key as part of the cache key
return f"{self.scope}:{api_key}"
def allow_request(self, request, view):
allowed = super().allow_request(request, view)
if allowed:
now = self.timer()
# Calculate the remaining limit and reset time
history = self.cache.get(self.key, [])
# Remove old histories
while history and history[-1] <= now - self.duration:
history.pop()
# Calculate the requests
num_requests = len(history)
# Check available requests
available = self.num_requests - num_requests
# Unix timestamp for when the rate limit will reset
reset_time = int(now + self.duration)
# Add headers
request.META["X-RateLimit-Remaining"] = max(0, available)
request.META["X-RateLimit-Reset"] = reset_time
return allowed

View File

@@ -0,0 +1,21 @@
from .user import UserLiteSerializer
from .workspace import WorkspaceLiteSerializer
from .project import ProjectSerializer, ProjectLiteSerializer
from .issue import (
IssueSerializer,
LabelSerializer,
IssueLinkSerializer,
IssueAttachmentSerializer,
IssueCommentSerializer,
IssueAttachmentSerializer,
IssueActivitySerializer,
IssueExpandSerializer,
)
from .state import StateLiteSerializer, StateSerializer
from .cycle import CycleSerializer, CycleIssueSerializer, CycleLiteSerializer
from .module import (
ModuleSerializer,
ModuleIssueSerializer,
ModuleLiteSerializer,
)
from .inbox import InboxIssueSerializer

View File

@@ -0,0 +1,107 @@
# Third party imports
from rest_framework import serializers
class BaseSerializer(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(read_only=True)
def __init__(self, *args, **kwargs):
# If 'fields' is provided in the arguments, remove it and store it separately.
# This is done so as not to pass this custom argument up to the superclass.
fields = kwargs.pop("fields", [])
self.expand = kwargs.pop("expand", []) or []
# Call the initialization of the superclass.
super().__init__(*args, **kwargs)
# If 'fields' was provided, filter the fields of the serializer accordingly.
if fields:
self.fields = self._filter_fields(fields=fields)
def _filter_fields(self, fields):
"""
Adjust the serializer's fields based on the provided 'fields' list.
:param fields: List or dictionary specifying which fields to include in the serializer.
:return: The updated fields for the serializer.
"""
# Check each field_name in the provided fields.
for field_name in fields:
# If the field is a dictionary (indicating nested fields),
# loop through its keys and values.
if isinstance(field_name, dict):
for key, value in field_name.items():
# If the value of this nested field is a list,
# perform a recursive filter on it.
if isinstance(value, list):
self._filter_fields(self.fields[key], value)
# Create a list to store allowed fields.
allowed = []
for item in fields:
# If the item is a string, it directly represents a field's name.
if isinstance(item, str):
allowed.append(item)
# If the item is a dictionary, it represents a nested field.
# Add the key of this dictionary to the allowed list.
elif isinstance(item, dict):
allowed.append(list(item.keys())[0])
# Convert the current serializer's fields and the allowed fields to sets.
existing = set(self.fields)
allowed = set(allowed)
# Remove fields from the serializer that aren't in the 'allowed' list.
for field_name in existing - allowed:
self.fields.pop(field_name)
return self.fields
def to_representation(self, instance):
response = super().to_representation(instance)
# Ensure 'expand' is iterable before processing
if self.expand:
for expand in self.expand:
if expand in self.fields:
# Import all the expandable serializers
from . import (
IssueSerializer,
ProjectLiteSerializer,
StateLiteSerializer,
UserLiteSerializer,
WorkspaceLiteSerializer,
)
# Expansion mapper
expansion = {
"user": UserLiteSerializer,
"workspace": WorkspaceLiteSerializer,
"project": ProjectLiteSerializer,
"default_assignee": UserLiteSerializer,
"project_lead": UserLiteSerializer,
"state": StateLiteSerializer,
"created_by": UserLiteSerializer,
"issue": IssueSerializer,
"actor": UserLiteSerializer,
"owned_by": UserLiteSerializer,
"members": UserLiteSerializer,
}
# Check if field in expansion then expand the field
if expand in expansion:
if isinstance(response.get(expand), list):
exp_serializer = expansion[expand](
getattr(instance, expand), many=True
)
else:
exp_serializer = expansion[expand](
getattr(instance, expand)
)
response[expand] = exp_serializer.data
else:
# You might need to handle this case differently
response[expand] = getattr(
instance, f"{expand}_id", None
)
return response

View File

@@ -0,0 +1,62 @@
# Third party imports
from rest_framework import serializers
# Module imports
from .base import BaseSerializer
from plane.db.models import Cycle, CycleIssue
class CycleSerializer(BaseSerializer):
total_issues = serializers.IntegerField(read_only=True)
cancelled_issues = serializers.IntegerField(read_only=True)
completed_issues = serializers.IntegerField(read_only=True)
started_issues = serializers.IntegerField(read_only=True)
unstarted_issues = serializers.IntegerField(read_only=True)
backlog_issues = serializers.IntegerField(read_only=True)
total_estimates = serializers.IntegerField(read_only=True)
completed_estimates = serializers.IntegerField(read_only=True)
started_estimates = serializers.IntegerField(read_only=True)
def validate(self, data):
if (
data.get("start_date", None) is not None
and data.get("end_date", None) is not None
and data.get("start_date", None) > data.get("end_date", None)
):
raise serializers.ValidationError(
"Start date cannot exceed end date"
)
return data
class Meta:
model = Cycle
fields = "__all__"
read_only_fields = [
"id",
"created_at",
"updated_at",
"created_by",
"updated_by",
"workspace",
"project",
"owned_by",
]
class CycleIssueSerializer(BaseSerializer):
sub_issues_count = serializers.IntegerField(read_only=True)
class Meta:
model = CycleIssue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"cycle",
]
class CycleLiteSerializer(BaseSerializer):
class Meta:
model = Cycle
fields = "__all__"

View File

@@ -0,0 +1,19 @@
# Module improts
from .base import BaseSerializer
from plane.db.models import InboxIssue
class InboxIssueSerializer(BaseSerializer):
class Meta:
model = InboxIssue
fields = "__all__"
read_only_fields = [
"id",
"workspace",
"project",
"issue",
"created_by",
"updated_by",
"created_at",
"updated_at",
]

View File

@@ -0,0 +1,429 @@
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
# Django imports
from django.utils import timezone
from lxml import html
# Third party imports
from rest_framework import serializers
# Module imports
from plane.db.models import (
Issue,
IssueActivity,
IssueAssignee,
IssueAttachment,
IssueComment,
IssueLabel,
IssueLink,
Label,
ProjectMember,
State,
User,
)
from .base import BaseSerializer
from .cycle import CycleLiteSerializer, CycleSerializer
from .module import ModuleLiteSerializer, ModuleSerializer
from .state import StateLiteSerializer
from .user import UserLiteSerializer
class IssueSerializer(BaseSerializer):
assignees = serializers.ListField(
child=serializers.PrimaryKeyRelatedField(
queryset=User.objects.values_list("id", flat=True)
),
write_only=True,
required=False,
)
labels = serializers.ListField(
child=serializers.PrimaryKeyRelatedField(
queryset=Label.objects.values_list("id", flat=True)
),
write_only=True,
required=False,
)
class Meta:
model = Issue
read_only_fields = [
"id",
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
exclude = [
"description",
"description_stripped",
]
def validate(self, data):
if (
data.get("start_date", None) is not None
and data.get("target_date", None) is not None
and data.get("start_date", None) > data.get("target_date", None)
):
raise serializers.ValidationError(
"Start date cannot exceed target date"
)
try:
if data.get("description_html", None) is not None:
parsed = html.fromstring(data["description_html"])
parsed_str = html.tostring(parsed, encoding="unicode")
data["description_html"] = parsed_str
except Exception:
raise serializers.ValidationError("Invalid HTML passed")
# Validate assignees are from project
if data.get("assignees", []):
data["assignees"] = ProjectMember.objects.filter(
project_id=self.context.get("project_id"),
is_active=True,
member_id__in=data["assignees"],
).values_list("member_id", flat=True)
# Validate labels are from project
if data.get("labels", []):
data["labels"] = Label.objects.filter(
project_id=self.context.get("project_id"),
id__in=data["labels"],
).values_list("id", flat=True)
# Check state is from the project only else raise validation error
if (
data.get("state")
and not State.objects.filter(
project_id=self.context.get("project_id"),
pk=data.get("state").id,
).exists()
):
raise serializers.ValidationError(
"State is not valid please pass a valid state_id"
)
# Check parent issue is from workspace as it can be cross workspace
if (
data.get("parent")
and not Issue.objects.filter(
workspace_id=self.context.get("workspace_id"),
pk=data.get("parent").id,
).exists()
):
raise serializers.ValidationError(
"Parent is not valid issue_id please pass a valid issue_id"
)
return data
def create(self, validated_data):
assignees = validated_data.pop("assignees", None)
labels = validated_data.pop("labels", None)
project_id = self.context["project_id"]
workspace_id = self.context["workspace_id"]
default_assignee_id = self.context["default_assignee_id"]
issue = Issue.objects.create(**validated_data, project_id=project_id)
# Issue Audit Users
created_by_id = issue.created_by_id
updated_by_id = issue.updated_by_id
if assignees is not None and len(assignees):
IssueAssignee.objects.bulk_create(
[
IssueAssignee(
assignee_id=assignee_id,
issue=issue,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for assignee_id in assignees
],
batch_size=10,
)
else:
# Then assign it to default assignee
if default_assignee_id is not None:
IssueAssignee.objects.create(
assignee_id=default_assignee_id,
issue=issue,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
if labels is not None and len(labels):
IssueLabel.objects.bulk_create(
[
IssueLabel(
label_id=label_id,
issue=issue,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for label_id in labels
],
batch_size=10,
)
return issue
def update(self, instance, validated_data):
assignees = validated_data.pop("assignees", None)
labels = validated_data.pop("labels", None)
# Related models
project_id = instance.project_id
workspace_id = instance.workspace_id
created_by_id = instance.created_by_id
updated_by_id = instance.updated_by_id
if assignees is not None:
IssueAssignee.objects.filter(issue=instance).delete()
IssueAssignee.objects.bulk_create(
[
IssueAssignee(
assignee_id=assignee_id,
issue=instance,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for assignee_id in assignees
],
batch_size=10,
)
if labels is not None:
IssueLabel.objects.filter(issue=instance).delete()
IssueLabel.objects.bulk_create(
[
IssueLabel(
label_id=label_id,
issue=instance,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for label_id in labels
],
batch_size=10,
)
# Time updation occues even when other related models are updated
instance.updated_at = timezone.now()
return super().update(instance, validated_data)
def to_representation(self, instance):
data = super().to_representation(instance)
if "assignees" in self.fields:
if "assignees" in self.expand:
from .user import UserLiteSerializer
data["assignees"] = UserLiteSerializer(
instance.assignees.all(), many=True
).data
else:
data["assignees"] = [
str(assignee.id) for assignee in instance.assignees.all()
]
if "labels" in self.fields:
if "labels" in self.expand:
data["labels"] = LabelSerializer(
instance.labels.all(), many=True
).data
else:
data["labels"] = [
str(label.id) for label in instance.labels.all()
]
return data
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = "__all__"
read_only_fields = [
"id",
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class IssueLinkSerializer(BaseSerializer):
class Meta:
model = IssueLink
fields = "__all__"
read_only_fields = [
"id",
"workspace",
"project",
"issue",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
def validate_url(self, value):
# Check URL format
validate_url = URLValidator()
try:
validate_url(value)
except ValidationError:
raise serializers.ValidationError("Invalid URL format.")
# Check URL scheme
if not value.startswith(("http://", "https://")):
raise serializers.ValidationError("Invalid URL scheme.")
return value
# Validation if url already exists
def create(self, validated_data):
if IssueLink.objects.filter(
url=validated_data.get("url"),
issue_id=validated_data.get("issue_id"),
).exists():
raise serializers.ValidationError(
{"error": "URL already exists for this Issue"}
)
return IssueLink.objects.create(**validated_data)
def update(self, instance, validated_data):
if IssueLink.objects.filter(
url=validated_data.get("url"),
issue_id=instance.issue_id,
).exists():
raise serializers.ValidationError(
{"error": "URL already exists for this Issue"}
)
return super().update(instance, validated_data)
class IssueAttachmentSerializer(BaseSerializer):
class Meta:
model = IssueAttachment
fields = "__all__"
read_only_fields = [
"id",
"workspace",
"project",
"issue",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class IssueCommentSerializer(BaseSerializer):
is_member = serializers.BooleanField(read_only=True)
class Meta:
model = IssueComment
read_only_fields = [
"id",
"workspace",
"project",
"issue",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
exclude = [
"comment_stripped",
"comment_json",
]
def validate(self, data):
try:
if data.get("comment_html", None) is not None:
parsed = html.fromstring(data["comment_html"])
parsed_str = html.tostring(parsed, encoding="unicode")
data["comment_html"] = parsed_str
except Exception:
raise serializers.ValidationError("Invalid HTML passed")
return data
class IssueActivitySerializer(BaseSerializer):
class Meta:
model = IssueActivity
exclude = [
"created_by",
"updated_by",
]
class CycleIssueSerializer(BaseSerializer):
cycle = CycleSerializer(read_only=True)
class Meta:
fields = [
"cycle",
]
class ModuleIssueSerializer(BaseSerializer):
module = ModuleSerializer(read_only=True)
class Meta:
fields = [
"module",
]
class LabelLiteSerializer(BaseSerializer):
class Meta:
model = Label
fields = [
"id",
"name",
"color",
]
class IssueExpandSerializer(BaseSerializer):
cycle = CycleLiteSerializer(source="issue_cycle.cycle", read_only=True)
module = ModuleLiteSerializer(source="issue_module.module", read_only=True)
labels = LabelLiteSerializer(read_only=True, many=True)
assignees = UserLiteSerializer(read_only=True, many=True)
state = StateLiteSerializer(read_only=True)
class Meta:
model = Issue
fields = "__all__"
read_only_fields = [
"id",
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]

View File

@@ -0,0 +1,163 @@
# Third party imports
from rest_framework import serializers
# Module imports
from .base import BaseSerializer
from plane.db.models import (
User,
Module,
ModuleLink,
ModuleMember,
ModuleIssue,
ProjectMember,
)
class ModuleSerializer(BaseSerializer):
members = serializers.ListField(
child=serializers.PrimaryKeyRelatedField(
queryset=User.objects.values_list("id", flat=True)
),
write_only=True,
required=False,
)
total_issues = serializers.IntegerField(read_only=True)
cancelled_issues = serializers.IntegerField(read_only=True)
completed_issues = serializers.IntegerField(read_only=True)
started_issues = serializers.IntegerField(read_only=True)
unstarted_issues = serializers.IntegerField(read_only=True)
backlog_issues = serializers.IntegerField(read_only=True)
class Meta:
model = Module
fields = "__all__"
read_only_fields = [
"id",
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
def to_representation(self, instance):
data = super().to_representation(instance)
data["members"] = [str(member.id) for member in instance.members.all()]
return data
def validate(self, data):
if (
data.get("start_date", None) is not None
and data.get("target_date", None) is not None
and data.get("start_date", None) > data.get("target_date", None)
):
raise serializers.ValidationError(
"Start date cannot exceed target date"
)
if data.get("members", []):
data["members"] = ProjectMember.objects.filter(
project_id=self.context.get("project_id"),
member_id__in=data["members"],
).values_list("member_id", flat=True)
return data
def create(self, validated_data):
members = validated_data.pop("members", None)
project_id = self.context["project_id"]
workspace_id = self.context["workspace_id"]
module = Module.objects.create(**validated_data, project_id=project_id)
if members is not None:
ModuleMember.objects.bulk_create(
[
ModuleMember(
module=module,
member_id=str(member),
project_id=project_id,
workspace_id=workspace_id,
created_by=module.created_by,
updated_by=module.updated_by,
)
for member in members
],
batch_size=10,
ignore_conflicts=True,
)
return module
def update(self, instance, validated_data):
members = validated_data.pop("members", None)
if members is not None:
ModuleMember.objects.filter(module=instance).delete()
ModuleMember.objects.bulk_create(
[
ModuleMember(
module=instance,
member_id=str(member),
project=instance.project,
workspace=instance.project.workspace,
created_by=instance.created_by,
updated_by=instance.updated_by,
)
for member in members
],
batch_size=10,
ignore_conflicts=True,
)
return super().update(instance, validated_data)
class ModuleIssueSerializer(BaseSerializer):
sub_issues_count = serializers.IntegerField(read_only=True)
class Meta:
model = ModuleIssue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
"module",
]
class ModuleLinkSerializer(BaseSerializer):
class Meta:
model = ModuleLink
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
"module",
]
# Validation if url already exists
def create(self, validated_data):
if ModuleLink.objects.filter(
url=validated_data.get("url"),
module_id=validated_data.get("module_id"),
).exists():
raise serializers.ValidationError(
{"error": "URL already exists for this Issue"}
)
return ModuleLink.objects.create(**validated_data)
class ModuleLiteSerializer(BaseSerializer):
class Meta:
model = Module
fields = "__all__"

View File

@@ -0,0 +1,100 @@
# Third party imports
from rest_framework import serializers
# Module imports
from plane.db.models import (
Project,
ProjectIdentifier,
WorkspaceMember,
)
from .base import BaseSerializer
class ProjectSerializer(BaseSerializer):
total_members = serializers.IntegerField(read_only=True)
total_cycles = serializers.IntegerField(read_only=True)
total_modules = serializers.IntegerField(read_only=True)
is_member = serializers.BooleanField(read_only=True)
sort_order = serializers.FloatField(read_only=True)
member_role = serializers.IntegerField(read_only=True)
is_deployed = serializers.BooleanField(read_only=True)
class Meta:
model = Project
fields = "__all__"
read_only_fields = [
"id",
"emoji",
"workspace",
"created_at",
"updated_at",
"created_by",
"updated_by",
]
def validate(self, data):
# Check project lead should be a member of the workspace
if (
data.get("project_lead", None) is not None
and not WorkspaceMember.objects.filter(
workspace_id=self.context["workspace_id"],
member_id=data.get("project_lead"),
).exists()
):
raise serializers.ValidationError(
"Project lead should be a user in the workspace"
)
# Check default assignee should be a member of the workspace
if (
data.get("default_assignee", None) is not None
and not WorkspaceMember.objects.filter(
workspace_id=self.context["workspace_id"],
member_id=data.get("default_assignee"),
).exists()
):
raise serializers.ValidationError(
"Default assignee should be a user in the workspace"
)
return data
def create(self, validated_data):
identifier = validated_data.get("identifier", "").strip().upper()
if identifier == "":
raise serializers.ValidationError(
detail="Project Identifier is required"
)
if ProjectIdentifier.objects.filter(
name=identifier, workspace_id=self.context["workspace_id"]
).exists():
raise serializers.ValidationError(
detail="Project Identifier is taken"
)
project = Project.objects.create(
**validated_data, workspace_id=self.context["workspace_id"]
)
_ = ProjectIdentifier.objects.create(
name=project.identifier,
project=project,
workspace_id=self.context["workspace_id"],
)
return project
class ProjectLiteSerializer(BaseSerializer):
class Meta:
model = Project
fields = [
"id",
"identifier",
"name",
"cover_image",
"icon_prop",
"emoji",
"description",
]
read_only_fields = fields

View File

@@ -0,0 +1,38 @@
# Module imports
from .base import BaseSerializer
from plane.db.models import State
class StateSerializer(BaseSerializer):
def validate(self, data):
# If the default is being provided then make all other states default False
if data.get("default", False):
State.objects.filter(
project_id=self.context.get("project_id")
).update(default=False)
return data
class Meta:
model = State
fields = "__all__"
read_only_fields = [
"id",
"created_by",
"updated_by",
"created_at",
"updated_at",
"workspace",
"project",
]
class StateLiteSerializer(BaseSerializer):
class Meta:
model = State
fields = [
"id",
"name",
"color",
"group",
]
read_only_fields = fields

View File

@@ -0,0 +1,19 @@
# Module imports
from plane.db.models import User
from .base import BaseSerializer
class UserLiteSerializer(BaseSerializer):
class Meta:
model = User
fields = [
"id",
"first_name",
"last_name",
"email",
"avatar",
"display_name",
"email",
]
read_only_fields = fields

View File

@@ -0,0 +1,16 @@
# Module imports
from plane.db.models import Workspace
from .base import BaseSerializer
class WorkspaceLiteSerializer(BaseSerializer):
"""Lite serializer with only required fields"""
class Meta:
model = Workspace
fields = [
"name",
"slug",
"id",
]
read_only_fields = fields

View File

@@ -0,0 +1,15 @@
from .project import urlpatterns as project_patterns
from .state import urlpatterns as state_patterns
from .issue import urlpatterns as issue_patterns
from .cycle import urlpatterns as cycle_patterns
from .module import urlpatterns as module_patterns
from .inbox import urlpatterns as inbox_patterns
urlpatterns = [
*project_patterns,
*state_patterns,
*issue_patterns,
*cycle_patterns,
*module_patterns,
*inbox_patterns,
]

View File

@@ -0,0 +1,17 @@
from django.urls import path
from plane.api.views import InboxIssueAPIEndpoint
urlpatterns = [
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/inbox-issues/",
InboxIssueAPIEndpoint.as_view(),
name="inbox-issue",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/inbox-issues/<uuid:issue_id>/",
InboxIssueAPIEndpoint.as_view(),
name="inbox-issue",
),
]

View File

@@ -0,0 +1,62 @@
from django.urls import path
from plane.api.views import (
IssueAPIEndpoint,
LabelAPIEndpoint,
IssueLinkAPIEndpoint,
IssueCommentAPIEndpoint,
IssueActivityAPIEndpoint,
)
urlpatterns = [
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/",
IssueAPIEndpoint.as_view(),
name="issue",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:pk>/",
IssueAPIEndpoint.as_view(),
name="issue",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/labels/",
LabelAPIEndpoint.as_view(),
name="label",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/labels/<uuid:pk>/",
LabelAPIEndpoint.as_view(),
name="label",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:issue_id>/links/",
IssueLinkAPIEndpoint.as_view(),
name="link",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:issue_id>/links/<uuid:pk>/",
IssueLinkAPIEndpoint.as_view(),
name="link",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:issue_id>/comments/",
IssueCommentAPIEndpoint.as_view(),
name="comment",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:issue_id>/comments/<uuid:pk>/",
IssueCommentAPIEndpoint.as_view(),
name="comment",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:issue_id>/activities/",
IssueActivityAPIEndpoint.as_view(),
name="activity",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/issues/<uuid:issue_id>/activities/<uuid:pk>/",
IssueActivityAPIEndpoint.as_view(),
name="activity",
),
]

View File

@@ -0,0 +1,24 @@
from django.urls import path
from plane.api.views import (
ProjectAPIEndpoint,
ProjectArchiveUnarchiveAPIEndpoint,
)
urlpatterns = [
path(
"workspaces/<str:slug>/projects/",
ProjectAPIEndpoint.as_view(),
name="project",
),
path(
"workspaces/<str:slug>/projects/<uuid:pk>/",
ProjectAPIEndpoint.as_view(),
name="project",
),
path(
"workspaces/<str:slug>/projects/<uuid:project_id>/archive/",
ProjectArchiveUnarchiveAPIEndpoint.as_view(),
name="project-archive-unarchive",
),
]

View File

@@ -0,0 +1,26 @@
from .project import ProjectAPIEndpoint, ProjectArchiveUnarchiveAPIEndpoint
from .state import StateAPIEndpoint
from .issue import (
IssueAPIEndpoint,
LabelAPIEndpoint,
IssueLinkAPIEndpoint,
IssueCommentAPIEndpoint,
IssueActivityAPIEndpoint,
)
from .cycle import (
CycleAPIEndpoint,
CycleIssueAPIEndpoint,
TransferCycleIssueAPIEndpoint,
CycleArchiveUnarchiveAPIEndpoint,
)
from .module import (
ModuleAPIEndpoint,
ModuleIssueAPIEndpoint,
ModuleArchiveUnarchiveAPIEndpoint,
)
from .inbox import InboxIssueAPIEndpoint

View File

@@ -0,0 +1,192 @@
# Python imports
from urllib.parse import urlparse
import zoneinfo
# Django imports
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import IntegrityError
from django.urls import resolve
from django.utils import timezone
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
# Third party imports
from rest_framework.views import APIView
# Module imports
from plane.api.middleware.api_authentication import APIKeyAuthentication
from plane.api.rate_limit import ApiKeyRateThrottle
from plane.bgtasks.webhook_task import send_webhook
from plane.utils.exception_logger import log_exception
from plane.utils.paginator import BasePaginator
class TimezoneMixin:
"""
This enables timezone conversion according
to the user set timezone
"""
def initial(self, request, *args, **kwargs):
super().initial(request, *args, **kwargs)
if request.user.is_authenticated:
timezone.activate(zoneinfo.ZoneInfo(request.user.user_timezone))
else:
timezone.deactivate()
class WebhookMixin:
webhook_event = None
bulk = False
def finalize_response(self, request, response, *args, **kwargs):
response = super().finalize_response(
request, response, *args, **kwargs
)
# Check for the case should webhook be sent
if (
self.webhook_event
and self.request.method in ["POST", "PATCH", "DELETE"]
and response.status_code in [200, 201, 204]
):
url = request.build_absolute_uri()
parsed_url = urlparse(url)
# Extract the scheme and netloc
scheme = parsed_url.scheme
netloc = parsed_url.netloc
# Push the object to delay
send_webhook.delay(
event=self.webhook_event,
payload=response.data,
kw=self.kwargs,
action=self.request.method,
slug=self.workspace_slug,
bulk=self.bulk,
current_site=f"{scheme}://{netloc}",
)
return response
class BaseAPIView(TimezoneMixin, APIView, BasePaginator):
authentication_classes = [
APIKeyAuthentication,
]
permission_classes = [
IsAuthenticated,
]
throttle_classes = [
ApiKeyRateThrottle,
]
def filter_queryset(self, queryset):
for backend in list(self.filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
try:
response = super().handle_exception(exc)
return response
except Exception as e:
if isinstance(e, IntegrityError):
return Response(
{"error": "The payload is not valid"},
status=status.HTTP_400_BAD_REQUEST,
)
if isinstance(e, ValidationError):
return Response(
{"error": "Please provide valid detail"},
status=status.HTTP_400_BAD_REQUEST,
)
if isinstance(e, ObjectDoesNotExist):
return Response(
{"error": "The requested resource does not exist."},
status=status.HTTP_404_NOT_FOUND,
)
if isinstance(e, KeyError):
return Response(
{"error": "The required key does not exist."},
status=status.HTTP_400_BAD_REQUEST,
)
log_exception(e)
return Response(
{"error": "Something went wrong please try again later"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
def dispatch(self, request, *args, **kwargs):
try:
response = super().dispatch(request, *args, **kwargs)
if settings.DEBUG:
from django.db import connection
print(
f"{request.method} - {request.get_full_path()} of Queries: {len(connection.queries)}"
)
return response
except Exception as exc:
response = self.handle_exception(exc)
return exc
def finalize_response(self, request, response, *args, **kwargs):
# Call super to get the default response
response = super().finalize_response(
request, response, *args, **kwargs
)
# Add custom headers if they exist in the request META
ratelimit_remaining = request.META.get("X-RateLimit-Remaining")
if ratelimit_remaining is not None:
response["X-RateLimit-Remaining"] = ratelimit_remaining
ratelimit_reset = request.META.get("X-RateLimit-Reset")
if ratelimit_reset is not None:
response["X-RateLimit-Reset"] = ratelimit_reset
return response
@property
def workspace_slug(self):
return self.kwargs.get("slug", None)
@property
def project_id(self):
project_id = self.kwargs.get("project_id", None)
if project_id:
return project_id
if resolve(self.request.path_info).url_name == "project":
return self.kwargs.get("pk", None)
@property
def fields(self):
fields = [
field
for field in self.request.GET.get("fields", "").split(",")
if field
]
return fields if fields else None
@property
def expand(self):
expand = [
expand
for expand in self.request.GET.get("expand", "").split(",")
if expand
]
return expand if expand else None

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,376 @@
# Python imports
import json
# Django improts
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Q
from django.utils import timezone
# Third party imports
from rest_framework import status
from rest_framework.response import Response
# Module imports
from plane.api.serializers import InboxIssueSerializer, IssueSerializer
from plane.app.permissions import ProjectLitePermission
from plane.bgtasks.issue_activites_task import issue_activity
from plane.db.models import (
Inbox,
InboxIssue,
Issue,
Project,
ProjectMember,
State,
)
from .base import BaseAPIView
class InboxIssueAPIEndpoint(BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to inbox issues.
"""
permission_classes = [
ProjectLitePermission,
]
serializer_class = InboxIssueSerializer
model = InboxIssue
filterset_fields = [
"status",
]
def get_queryset(self):
inbox = Inbox.objects.filter(
workspace__slug=self.kwargs.get("slug"),
project_id=self.kwargs.get("project_id"),
).first()
project = Project.objects.get(
workspace__slug=self.kwargs.get("slug"),
pk=self.kwargs.get("project_id"),
)
if inbox is None and not project.inbox_view:
return InboxIssue.objects.none()
return (
InboxIssue.objects.filter(
Q(snoozed_till__gte=timezone.now())
| Q(snoozed_till__isnull=True),
workspace__slug=self.kwargs.get("slug"),
project_id=self.kwargs.get("project_id"),
inbox_id=inbox.id,
)
.select_related("issue", "workspace", "project")
.order_by(self.kwargs.get("order_by", "-created_at"))
)
def get(self, request, slug, project_id, issue_id=None):
if issue_id:
inbox_issue_queryset = self.get_queryset().get(issue_id=issue_id)
inbox_issue_data = InboxIssueSerializer(
inbox_issue_queryset,
fields=self.fields,
expand=self.expand,
).data
return Response(
inbox_issue_data,
status=status.HTTP_200_OK,
)
issue_queryset = self.get_queryset()
return self.paginate(
request=request,
queryset=(issue_queryset),
on_results=lambda inbox_issues: InboxIssueSerializer(
inbox_issues,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def post(self, request, slug, project_id):
if not request.data.get("issue", {}).get("name", False):
return Response(
{"error": "Name is required"},
status=status.HTTP_400_BAD_REQUEST,
)
inbox = Inbox.objects.filter(
workspace__slug=slug, project_id=project_id
).first()
project = Project.objects.get(
workspace__slug=slug,
pk=project_id,
)
# Inbox view
if inbox is None and not project.inbox_view:
return Response(
{
"error": "Inbox is not enabled for this project enable it through the project's api"
},
status=status.HTTP_400_BAD_REQUEST,
)
# Check for valid priority
if request.data.get("issue", {}).get("priority", "none") not in [
"low",
"medium",
"high",
"urgent",
"none",
]:
return Response(
{"error": "Invalid priority"},
status=status.HTTP_400_BAD_REQUEST,
)
# Create or get state
state, _ = State.objects.get_or_create(
name="Triage",
group="triage",
description="Default state for managing all Inbox Issues",
project_id=project_id,
color="#ff7700",
is_triage=True,
)
# create an issue
issue = Issue.objects.create(
name=request.data.get("issue", {}).get("name"),
description=request.data.get("issue", {}).get("description", {}),
description_html=request.data.get("issue", {}).get(
"description_html", "<p></p>"
),
priority=request.data.get("issue", {}).get("priority", "low"),
project_id=project_id,
state=state,
)
# Create an Issue Activity
issue_activity.delay(
type="issue.activity.created",
requested_data=json.dumps(request.data, cls=DjangoJSONEncoder),
actor_id=str(request.user.id),
issue_id=str(issue.id),
project_id=str(project_id),
current_instance=None,
epoch=int(timezone.now().timestamp()),
)
# create an inbox issue
inbox_issue = InboxIssue.objects.create(
inbox_id=inbox.id,
project_id=project_id,
issue=issue,
source=request.data.get("source", "in-app"),
)
serializer = InboxIssueSerializer(inbox_issue)
return Response(serializer.data, status=status.HTTP_200_OK)
def patch(self, request, slug, project_id, issue_id):
inbox = Inbox.objects.filter(
workspace__slug=slug, project_id=project_id
).first()
project = Project.objects.get(
workspace__slug=slug,
pk=project_id,
)
# Inbox view
if inbox is None and not project.inbox_view:
return Response(
{
"error": "Inbox is not enabled for this project enable it through the project's api"
},
status=status.HTTP_400_BAD_REQUEST,
)
# Get the inbox issue
inbox_issue = InboxIssue.objects.get(
issue_id=issue_id,
workspace__slug=slug,
project_id=project_id,
inbox_id=inbox.id,
)
# Get the project member
project_member = ProjectMember.objects.get(
workspace__slug=slug,
project_id=project_id,
member=request.user,
is_active=True,
)
# Only project members admins and created_by users can access this endpoint
if project_member.role <= 10 and str(inbox_issue.created_by_id) != str(
request.user.id
):
return Response(
{"error": "You cannot edit inbox issues"},
status=status.HTTP_400_BAD_REQUEST,
)
# Get issue data
issue_data = request.data.pop("issue", False)
if bool(issue_data):
issue = Issue.objects.get(
pk=issue_id, workspace__slug=slug, project_id=project_id
)
# Only allow guests and viewers to edit name and description
if project_member.role <= 10:
# viewers and guests since only viewers and guests
issue_data = {
"name": issue_data.get("name", issue.name),
"description_html": issue_data.get(
"description_html", issue.description_html
),
"description": issue_data.get(
"description", issue.description
),
}
issue_serializer = IssueSerializer(
issue, data=issue_data, partial=True
)
if issue_serializer.is_valid():
current_instance = issue
# Log all the updates
requested_data = json.dumps(issue_data, cls=DjangoJSONEncoder)
if issue is not None:
issue_activity.delay(
type="issue.activity.updated",
requested_data=requested_data,
actor_id=str(request.user.id),
issue_id=str(issue_id),
project_id=str(project_id),
current_instance=json.dumps(
IssueSerializer(current_instance).data,
cls=DjangoJSONEncoder,
),
epoch=int(timezone.now().timestamp()),
)
issue_serializer.save()
else:
return Response(
issue_serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
# Only project admins and members can edit inbox issue attributes
if project_member.role > 10:
serializer = InboxIssueSerializer(
inbox_issue, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save()
# Update the issue state if the issue is rejected or marked as duplicate
if serializer.data["status"] in [-1, 2]:
issue = Issue.objects.get(
pk=issue_id,
workspace__slug=slug,
project_id=project_id,
)
state = State.objects.filter(
group="cancelled",
workspace__slug=slug,
project_id=project_id,
).first()
if state is not None:
issue.state = state
issue.save()
# Update the issue state if it is accepted
if serializer.data["status"] in [1]:
issue = Issue.objects.get(
pk=issue_id,
workspace__slug=slug,
project_id=project_id,
)
# Update the issue state only if it is in triage state
if issue.state.is_triage:
# Move to default state
state = State.objects.filter(
workspace__slug=slug,
project_id=project_id,
default=True,
).first()
if state is not None:
issue.state = state
issue.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
else:
return Response(
InboxIssueSerializer(inbox_issue).data,
status=status.HTTP_200_OK,
)
def delete(self, request, slug, project_id, issue_id):
inbox = Inbox.objects.filter(
workspace__slug=slug, project_id=project_id
).first()
project = Project.objects.get(
workspace__slug=slug,
pk=project_id,
)
# Inbox view
if inbox is None and not project.inbox_view:
return Response(
{
"error": "Inbox is not enabled for this project enable it through the project's api"
},
status=status.HTTP_400_BAD_REQUEST,
)
# Get the inbox issue
inbox_issue = InboxIssue.objects.get(
issue_id=issue_id,
workspace__slug=slug,
project_id=project_id,
inbox_id=inbox.id,
)
# Get the project member
project_member = ProjectMember.objects.get(
workspace__slug=slug,
project_id=project_id,
member=request.user,
is_active=True,
)
# Check the inbox issue created
if project_member.role <= 10 and str(inbox_issue.created_by_id) != str(
request.user.id
):
return Response(
{"error": "You cannot delete inbox issue"},
status=status.HTTP_400_BAD_REQUEST,
)
# Check the issue status
if inbox_issue.status in [-2, -1, 0, 2]:
# Delete the issue also
Issue.objects.filter(
workspace__slug=slug, project_id=project_id, pk=issue_id
).delete()
inbox_issue.delete()
return Response(status=status.HTTP_204_NO_CONTENT)

View File

@@ -0,0 +1,815 @@
# Python imports
import json
from django.core.serializers.json import DjangoJSONEncoder
# Django imports
from django.db import IntegrityError
from django.db.models import (
Case,
CharField,
Exists,
F,
Func,
Max,
OuterRef,
Q,
Value,
When,
)
from django.utils import timezone
# Third party imports
from rest_framework import status
from rest_framework.response import Response
# Module imports
from plane.api.serializers import (
IssueActivitySerializer,
IssueCommentSerializer,
IssueLinkSerializer,
IssueSerializer,
LabelSerializer,
)
from plane.app.permissions import (
ProjectEntityPermission,
ProjectLitePermission,
ProjectMemberPermission,
)
from plane.bgtasks.issue_activites_task import issue_activity
from plane.db.models import (
Issue,
IssueActivity,
IssueAttachment,
IssueComment,
IssueLink,
Label,
Project,
ProjectMember,
)
from .base import BaseAPIView, WebhookMixin
class IssueAPIEndpoint(WebhookMixin, BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to issue.
"""
model = Issue
webhook_event = "issue"
permission_classes = [
ProjectEntityPermission,
]
serializer_class = IssueSerializer
def get_queryset(self):
return (
Issue.issue_objects.annotate(
sub_issues_count=Issue.issue_objects.filter(
parent=OuterRef("id")
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.filter(project_id=self.kwargs.get("project_id"))
.filter(workspace__slug=self.kwargs.get("slug"))
.select_related("project")
.select_related("workspace")
.select_related("state")
.select_related("parent")
.prefetch_related("assignees")
.prefetch_related("labels")
.order_by(self.kwargs.get("order_by", "-created_at"))
).distinct()
def get(self, request, slug, project_id, pk=None):
if pk:
issue = Issue.issue_objects.annotate(
sub_issues_count=Issue.issue_objects.filter(
parent=OuterRef("id")
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
).get(workspace__slug=slug, project_id=project_id, pk=pk)
return Response(
IssueSerializer(
issue,
fields=self.fields,
expand=self.expand,
).data,
status=status.HTTP_200_OK,
)
# Custom ordering for priority and state
priority_order = ["urgent", "high", "medium", "low", "none"]
state_order = [
"backlog",
"unstarted",
"started",
"completed",
"cancelled",
]
order_by_param = request.GET.get("order_by", "-created_at")
issue_queryset = (
self.get_queryset()
.annotate(cycle_id=F("issue_cycle__cycle_id"))
.annotate(module_id=F("issue_module__module_id"))
.annotate(
link_count=IssueLink.objects.filter(issue=OuterRef("id"))
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.annotate(
attachment_count=IssueAttachment.objects.filter(
issue=OuterRef("id")
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
)
# Priority Ordering
if order_by_param == "priority" or order_by_param == "-priority":
priority_order = (
priority_order
if order_by_param == "priority"
else priority_order[::-1]
)
issue_queryset = issue_queryset.annotate(
priority_order=Case(
*[
When(priority=p, then=Value(i))
for i, p in enumerate(priority_order)
],
output_field=CharField(),
)
).order_by("priority_order")
# State Ordering
elif order_by_param in [
"state__name",
"state__group",
"-state__name",
"-state__group",
]:
state_order = (
state_order
if order_by_param in ["state__name", "state__group"]
else state_order[::-1]
)
issue_queryset = issue_queryset.annotate(
state_order=Case(
*[
When(state__group=state_group, then=Value(i))
for i, state_group in enumerate(state_order)
],
default=Value(len(state_order)),
output_field=CharField(),
)
).order_by("state_order")
# assignee and label ordering
elif order_by_param in [
"labels__name",
"-labels__name",
"assignees__first_name",
"-assignees__first_name",
]:
issue_queryset = issue_queryset.annotate(
max_values=Max(
order_by_param[1::]
if order_by_param.startswith("-")
else order_by_param
)
).order_by(
"-max_values"
if order_by_param.startswith("-")
else "max_values"
)
else:
issue_queryset = issue_queryset.order_by(order_by_param)
return self.paginate(
request=request,
queryset=(issue_queryset),
on_results=lambda issues: IssueSerializer(
issues,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def post(self, request, slug, project_id):
project = Project.objects.get(pk=project_id)
serializer = IssueSerializer(
data=request.data,
context={
"project_id": project_id,
"workspace_id": project.workspace_id,
"default_assignee_id": project.default_assignee_id,
},
)
if serializer.is_valid():
if (
request.data.get("external_id")
and request.data.get("external_source")
and Issue.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get("external_source"),
external_id=request.data.get("external_id"),
).exists()
):
issue = Issue.objects.filter(
workspace__slug=slug,
project_id=project_id,
external_id=request.data.get("external_id"),
external_source=request.data.get("external_source"),
).first()
return Response(
{
"error": "Issue with the same external id and external source already exists",
"id": str(issue.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save()
# Track the issue
issue_activity.delay(
type="issue.activity.created",
requested_data=json.dumps(
self.request.data, cls=DjangoJSONEncoder
),
actor_id=str(request.user.id),
issue_id=str(serializer.data.get("id", None)),
project_id=str(project_id),
current_instance=None,
epoch=int(timezone.now().timestamp()),
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, slug, project_id, pk=None):
issue = Issue.objects.get(
workspace__slug=slug, project_id=project_id, pk=pk
)
project = Project.objects.get(pk=project_id)
current_instance = json.dumps(
IssueSerializer(issue).data, cls=DjangoJSONEncoder
)
requested_data = json.dumps(self.request.data, cls=DjangoJSONEncoder)
serializer = IssueSerializer(
issue,
data=request.data,
context={
"project_id": project_id,
"workspace_id": project.workspace_id,
},
partial=True,
)
if serializer.is_valid():
if (
str(request.data.get("external_id"))
and (issue.external_id != str(request.data.get("external_id")))
and Issue.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get(
"external_source", issue.external_source
),
external_id=request.data.get("external_id"),
).exists()
):
return Response(
{
"error": "Issue with the same external id and external source already exists",
"id": str(issue.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save()
issue_activity.delay(
type="issue.activity.updated",
requested_data=requested_data,
actor_id=str(request.user.id),
issue_id=str(pk),
project_id=str(project_id),
current_instance=current_instance,
epoch=int(timezone.now().timestamp()),
)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, slug, project_id, pk=None):
issue = Issue.objects.get(
workspace__slug=slug, project_id=project_id, pk=pk
)
current_instance = json.dumps(
IssueSerializer(issue).data, cls=DjangoJSONEncoder
)
issue.delete()
issue_activity.delay(
type="issue.activity.deleted",
requested_data=json.dumps({"issue_id": str(pk)}),
actor_id=str(request.user.id),
issue_id=str(pk),
project_id=str(project_id),
current_instance=current_instance,
epoch=int(timezone.now().timestamp()),
)
return Response(status=status.HTTP_204_NO_CONTENT)
class LabelAPIEndpoint(BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to the labels.
"""
serializer_class = LabelSerializer
model = Label
permission_classes = [
ProjectMemberPermission,
]
def get_queryset(self):
return (
Label.objects.filter(workspace__slug=self.kwargs.get("slug"))
.filter(project_id=self.kwargs.get("project_id"))
.filter(
project__project_projectmember__member=self.request.user,
project__project_projectmember__is_active=True,
)
.filter(project__archived_at__isnull=True)
.select_related("project")
.select_related("workspace")
.select_related("parent")
.distinct()
.order_by(self.kwargs.get("order_by", "-created_at"))
)
def post(self, request, slug, project_id):
try:
serializer = LabelSerializer(data=request.data)
if serializer.is_valid():
if (
request.data.get("external_id")
and request.data.get("external_source")
and Label.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get("external_source"),
external_id=request.data.get("external_id"),
).exists()
):
label = Label.objects.filter(
workspace__slug=slug,
project_id=project_id,
external_id=request.data.get("external_id"),
external_source=request.data.get("external_source"),
).first()
return Response(
{
"error": "Label with the same external id and external source already exists",
"id": str(label.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save(project_id=project_id)
return Response(
serializer.data, status=status.HTTP_201_CREATED
)
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
except IntegrityError:
label = Label.objects.filter(
workspace__slug=slug,
project_id=project_id,
name=request.data.get("name"),
).first()
return Response(
{
"error": "Label with the same name already exists in the project",
"id": str(label.id),
},
status=status.HTTP_409_CONFLICT,
)
def get(self, request, slug, project_id, pk=None):
if pk is None:
return self.paginate(
request=request,
queryset=(self.get_queryset()),
on_results=lambda labels: LabelSerializer(
labels,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
label = self.get_queryset().get(pk=pk)
serializer = LabelSerializer(
label,
fields=self.fields,
expand=self.expand,
)
return Response(serializer.data, status=status.HTTP_200_OK)
def patch(self, request, slug, project_id, pk=None):
label = self.get_queryset().get(pk=pk)
serializer = LabelSerializer(label, data=request.data, partial=True)
if serializer.is_valid():
if (
str(request.data.get("external_id"))
and (label.external_id != str(request.data.get("external_id")))
and Issue.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get(
"external_source", label.external_source
),
external_id=request.data.get("external_id"),
).exists()
):
return Response(
{
"error": "Label with the same external id and external source already exists",
"id": str(label.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, slug, project_id, pk=None):
label = self.get_queryset().get(pk=pk)
label.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class IssueLinkAPIEndpoint(BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to the links of the particular issue.
"""
permission_classes = [
ProjectEntityPermission,
]
model = IssueLink
serializer_class = IssueLinkSerializer
def get_queryset(self):
return (
IssueLink.objects.filter(workspace__slug=self.kwargs.get("slug"))
.filter(project_id=self.kwargs.get("project_id"))
.filter(issue_id=self.kwargs.get("issue_id"))
.filter(
project__project_projectmember__member=self.request.user,
project__project_projectmember__is_active=True,
)
.filter(project__archived_at__isnull=True)
.order_by(self.kwargs.get("order_by", "-created_at"))
.distinct()
)
def get(self, request, slug, project_id, issue_id, pk=None):
if pk is None:
issue_links = self.get_queryset()
serializer = IssueLinkSerializer(
issue_links,
fields=self.fields,
expand=self.expand,
)
return self.paginate(
request=request,
queryset=(self.get_queryset()),
on_results=lambda issue_links: IssueLinkSerializer(
issue_links,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
issue_link = self.get_queryset().get(pk=pk)
serializer = IssueLinkSerializer(
issue_link,
fields=self.fields,
expand=self.expand,
)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, slug, project_id, issue_id):
serializer = IssueLinkSerializer(data=request.data)
if serializer.is_valid():
serializer.save(
project_id=project_id,
issue_id=issue_id,
)
issue_activity.delay(
type="link.activity.created",
requested_data=json.dumps(
serializer.data, cls=DjangoJSONEncoder
),
actor_id=str(self.request.user.id),
issue_id=str(self.kwargs.get("issue_id")),
project_id=str(self.kwargs.get("project_id")),
current_instance=None,
epoch=int(timezone.now().timestamp()),
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, slug, project_id, issue_id, pk):
issue_link = IssueLink.objects.get(
workspace__slug=slug,
project_id=project_id,
issue_id=issue_id,
pk=pk,
)
requested_data = json.dumps(request.data, cls=DjangoJSONEncoder)
current_instance = json.dumps(
IssueLinkSerializer(issue_link).data,
cls=DjangoJSONEncoder,
)
serializer = IssueLinkSerializer(
issue_link, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save()
issue_activity.delay(
type="link.activity.updated",
requested_data=requested_data,
actor_id=str(request.user.id),
issue_id=str(issue_id),
project_id=str(project_id),
current_instance=current_instance,
epoch=int(timezone.now().timestamp()),
)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, slug, project_id, issue_id, pk):
issue_link = IssueLink.objects.get(
workspace__slug=slug,
project_id=project_id,
issue_id=issue_id,
pk=pk,
)
current_instance = json.dumps(
IssueLinkSerializer(issue_link).data,
cls=DjangoJSONEncoder,
)
issue_activity.delay(
type="link.activity.deleted",
requested_data=json.dumps({"link_id": str(pk)}),
actor_id=str(request.user.id),
issue_id=str(issue_id),
project_id=str(project_id),
current_instance=current_instance,
epoch=int(timezone.now().timestamp()),
)
issue_link.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class IssueCommentAPIEndpoint(WebhookMixin, BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to comments of the particular issue.
"""
serializer_class = IssueCommentSerializer
model = IssueComment
webhook_event = "issue_comment"
permission_classes = [
ProjectLitePermission,
]
def get_queryset(self):
return (
IssueComment.objects.filter(
workspace__slug=self.kwargs.get("slug")
)
.filter(project_id=self.kwargs.get("project_id"))
.filter(issue_id=self.kwargs.get("issue_id"))
.filter(
project__project_projectmember__member=self.request.user,
project__project_projectmember__is_active=True,
)
.filter(project__archived_at__isnull=True)
.select_related("workspace", "project", "issue", "actor")
.annotate(
is_member=Exists(
ProjectMember.objects.filter(
workspace__slug=self.kwargs.get("slug"),
project_id=self.kwargs.get("project_id"),
member_id=self.request.user.id,
is_active=True,
)
)
)
.order_by(self.kwargs.get("order_by", "-created_at"))
.distinct()
)
def get(self, request, slug, project_id, issue_id, pk=None):
if pk:
issue_comment = self.get_queryset().get(pk=pk)
serializer = IssueCommentSerializer(
issue_comment,
fields=self.fields,
expand=self.expand,
)
return Response(serializer.data, status=status.HTTP_200_OK)
return self.paginate(
request=request,
queryset=(self.get_queryset()),
on_results=lambda issue_comment: IssueCommentSerializer(
issue_comment,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def post(self, request, slug, project_id, issue_id):
# Validation check if the issue already exists
if (
request.data.get("external_id")
and request.data.get("external_source")
and IssueComment.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get("external_source"),
external_id=request.data.get("external_id"),
).exists()
):
issue_comment = IssueComment.objects.filter(
workspace__slug=slug,
project_id=project_id,
external_id=request.data.get("external_id"),
external_source=request.data.get("external_source"),
).first()
return Response(
{
"error": "Issue Comment with the same external id and external source already exists",
"id": str(issue_comment.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer = IssueCommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(
project_id=project_id,
issue_id=issue_id,
actor=request.user,
)
issue_activity.delay(
type="comment.activity.created",
requested_data=json.dumps(
serializer.data, cls=DjangoJSONEncoder
),
actor_id=str(self.request.user.id),
issue_id=str(self.kwargs.get("issue_id")),
project_id=str(self.kwargs.get("project_id")),
current_instance=None,
epoch=int(timezone.now().timestamp()),
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, slug, project_id, issue_id, pk):
issue_comment = IssueComment.objects.get(
workspace__slug=slug,
project_id=project_id,
issue_id=issue_id,
pk=pk,
)
requested_data = json.dumps(self.request.data, cls=DjangoJSONEncoder)
current_instance = json.dumps(
IssueCommentSerializer(issue_comment).data,
cls=DjangoJSONEncoder,
)
# Validation check if the issue already exists
if (
request.data.get("external_id")
and (
issue_comment.external_id
!= str(request.data.get("external_id"))
)
and IssueComment.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get(
"external_source", issue_comment.external_source
),
external_id=request.data.get("external_id"),
).exists()
):
return Response(
{
"error": "Issue Comment with the same external id and external source already exists",
"id": str(issue_comment.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer = IssueCommentSerializer(
issue_comment, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save()
issue_activity.delay(
type="comment.activity.updated",
requested_data=requested_data,
actor_id=str(request.user.id),
issue_id=str(issue_id),
project_id=str(project_id),
current_instance=current_instance,
epoch=int(timezone.now().timestamp()),
)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, slug, project_id, issue_id, pk):
issue_comment = IssueComment.objects.get(
workspace__slug=slug,
project_id=project_id,
issue_id=issue_id,
pk=pk,
)
current_instance = json.dumps(
IssueCommentSerializer(issue_comment).data,
cls=DjangoJSONEncoder,
)
issue_comment.delete()
issue_activity.delay(
type="comment.activity.deleted",
requested_data=json.dumps({"comment_id": str(pk)}),
actor_id=str(request.user.id),
issue_id=str(issue_id),
project_id=str(project_id),
current_instance=current_instance,
epoch=int(timezone.now().timestamp()),
)
return Response(status=status.HTTP_204_NO_CONTENT)
class IssueActivityAPIEndpoint(BaseAPIView):
permission_classes = [
ProjectEntityPermission,
]
def get(self, request, slug, project_id, issue_id, pk=None):
issue_activities = (
IssueActivity.objects.filter(
issue_id=issue_id, workspace__slug=slug, project_id=project_id
)
.filter(
~Q(field__in=["comment", "vote", "reaction", "draft"]),
project__project_projectmember__member=self.request.user,
project__project_projectmember__is_active=True,
)
.filter(project__archived_at__isnull=True)
.select_related("actor", "workspace", "issue", "project")
).order_by(request.GET.get("order_by", "created_at"))
if pk:
issue_activities = issue_activities.get(pk=pk)
serializer = IssueActivitySerializer(issue_activities)
return Response(serializer.data, status=status.HTTP_200_OK)
return self.paginate(
request=request,
queryset=(issue_activities),
on_results=lambda issue_activity: IssueActivitySerializer(
issue_activity,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)

View File

@@ -0,0 +1,590 @@
# Python imports
import json
# Django imports
from django.core import serializers
from django.db.models import Count, F, Func, OuterRef, Prefetch, Q
from django.utils import timezone
# Third party imports
from rest_framework import status
from rest_framework.response import Response
# Module imports
from plane.api.serializers import (
IssueSerializer,
ModuleIssueSerializer,
ModuleSerializer,
)
from plane.app.permissions import ProjectEntityPermission
from plane.bgtasks.issue_activites_task import issue_activity
from plane.db.models import (
Issue,
IssueAttachment,
IssueLink,
Module,
ModuleIssue,
ModuleLink,
Project,
)
from .base import BaseAPIView, WebhookMixin
class ModuleAPIEndpoint(WebhookMixin, BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to module.
"""
model = Module
permission_classes = [
ProjectEntityPermission,
]
serializer_class = ModuleSerializer
webhook_event = "module"
def get_queryset(self):
return (
Module.objects.filter(project_id=self.kwargs.get("project_id"))
.filter(workspace__slug=self.kwargs.get("slug"))
.select_related("project")
.select_related("workspace")
.select_related("lead")
.prefetch_related("members")
.prefetch_related(
Prefetch(
"link_module",
queryset=ModuleLink.objects.select_related(
"module", "created_by"
),
)
)
.annotate(
total_issues=Count(
"issue_module",
filter=Q(
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
),
)
.annotate(
completed_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="completed",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
cancelled_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="cancelled",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
started_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="started",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
unstarted_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="unstarted",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
backlog_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="backlog",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.order_by(self.kwargs.get("order_by", "-created_at"))
)
def post(self, request, slug, project_id):
project = Project.objects.get(pk=project_id, workspace__slug=slug)
serializer = ModuleSerializer(
data=request.data,
context={
"project_id": project_id,
"workspace_id": project.workspace_id,
},
)
if serializer.is_valid():
if (
request.data.get("external_id")
and request.data.get("external_source")
and Module.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get("external_source"),
external_id=request.data.get("external_id"),
).exists()
):
module = Module.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get("external_source"),
external_id=request.data.get("external_id"),
).first()
return Response(
{
"error": "Module with the same external id and external source already exists",
"id": str(module.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save()
module = Module.objects.get(pk=serializer.data["id"])
serializer = ModuleSerializer(module)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, slug, project_id, pk):
module = Module.objects.get(
pk=pk, project_id=project_id, workspace__slug=slug
)
if module.archived_at:
return Response(
{"error": "Archived module cannot be edited"},
status=status.HTTP_400_BAD_REQUEST,
)
serializer = ModuleSerializer(
module,
data=request.data,
context={"project_id": project_id},
partial=True,
)
if serializer.is_valid():
if (
request.data.get("external_id")
and (module.external_id != request.data.get("external_id"))
and Module.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get(
"external_source", module.external_source
),
external_id=request.data.get("external_id"),
).exists()
):
return Response(
{
"error": "Module with the same external id and external source already exists",
"id": str(module.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get(self, request, slug, project_id, pk=None):
if pk:
queryset = (
self.get_queryset().filter(archived_at__isnull=True).get(pk=pk)
)
data = ModuleSerializer(
queryset,
fields=self.fields,
expand=self.expand,
).data
return Response(
data,
status=status.HTTP_200_OK,
)
return self.paginate(
request=request,
queryset=(self.get_queryset().filter(archived_at__isnull=True)),
on_results=lambda modules: ModuleSerializer(
modules,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def delete(self, request, slug, project_id, pk):
module = Module.objects.get(
workspace__slug=slug, project_id=project_id, pk=pk
)
module_issues = list(
ModuleIssue.objects.filter(module_id=pk).values_list(
"issue", flat=True
)
)
issue_activity.delay(
type="module.activity.deleted",
requested_data=json.dumps(
{
"module_id": str(pk),
"module_name": str(module.name),
"issues": [str(issue_id) for issue_id in module_issues],
}
),
actor_id=str(request.user.id),
issue_id=None,
project_id=str(project_id),
current_instance=None,
epoch=int(timezone.now().timestamp()),
)
module.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ModuleIssueAPIEndpoint(WebhookMixin, BaseAPIView):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions related to module issues.
"""
serializer_class = ModuleIssueSerializer
model = ModuleIssue
webhook_event = "module_issue"
bulk = True
permission_classes = [
ProjectEntityPermission,
]
def get_queryset(self):
return (
ModuleIssue.objects.annotate(
sub_issues_count=Issue.issue_objects.filter(
parent=OuterRef("issue")
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.filter(workspace__slug=self.kwargs.get("slug"))
.filter(project_id=self.kwargs.get("project_id"))
.filter(module_id=self.kwargs.get("module_id"))
.filter(
project__project_projectmember__member=self.request.user,
project__project_projectmember__is_active=True,
)
.filter(project__archived_at__isnull=True)
.select_related("project")
.select_related("workspace")
.select_related("module")
.select_related("issue", "issue__state", "issue__project")
.prefetch_related("issue__assignees", "issue__labels")
.prefetch_related("module__members")
.order_by(self.kwargs.get("order_by", "-created_at"))
.distinct()
)
def get(self, request, slug, project_id, module_id):
order_by = request.GET.get("order_by", "created_at")
issues = (
Issue.issue_objects.filter(issue_module__module_id=module_id)
.annotate(
sub_issues_count=Issue.issue_objects.filter(
parent=OuterRef("id")
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.annotate(bridge_id=F("issue_module__id"))
.filter(project_id=project_id)
.filter(workspace__slug=slug)
.select_related("project")
.select_related("workspace")
.select_related("state")
.select_related("parent")
.prefetch_related("assignees")
.prefetch_related("labels")
.order_by(order_by)
.annotate(
link_count=IssueLink.objects.filter(issue=OuterRef("id"))
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.annotate(
attachment_count=IssueAttachment.objects.filter(
issue=OuterRef("id")
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
)
return self.paginate(
request=request,
queryset=(issues),
on_results=lambda issues: IssueSerializer(
issues,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def post(self, request, slug, project_id, module_id):
issues = request.data.get("issues", [])
if not len(issues):
return Response(
{"error": "Issues are required"},
status=status.HTTP_400_BAD_REQUEST,
)
module = Module.objects.get(
workspace__slug=slug, project_id=project_id, pk=module_id
)
issues = Issue.objects.filter(
workspace__slug=slug, project_id=project_id, pk__in=issues
).values_list("id", flat=True)
module_issues = list(ModuleIssue.objects.filter(issue_id__in=issues))
update_module_issue_activity = []
records_to_update = []
record_to_create = []
for issue in issues:
module_issue = [
module_issue
for module_issue in module_issues
if str(module_issue.issue_id) in issues
]
if len(module_issue):
if module_issue[0].module_id != module_id:
update_module_issue_activity.append(
{
"old_module_id": str(module_issue[0].module_id),
"new_module_id": str(module_id),
"issue_id": str(module_issue[0].issue_id),
}
)
module_issue[0].module_id = module_id
records_to_update.append(module_issue[0])
else:
record_to_create.append(
ModuleIssue(
module=module,
issue_id=issue,
project_id=project_id,
workspace=module.workspace,
created_by=request.user,
updated_by=request.user,
)
)
ModuleIssue.objects.bulk_create(
record_to_create,
batch_size=10,
ignore_conflicts=True,
)
ModuleIssue.objects.bulk_update(
records_to_update,
["module"],
batch_size=10,
)
# Capture Issue Activity
issue_activity.delay(
type="module.activity.created",
requested_data=json.dumps({"modules_list": str(issues)}),
actor_id=str(self.request.user.id),
issue_id=None,
project_id=str(self.kwargs.get("project_id", None)),
current_instance=json.dumps(
{
"updated_module_issues": update_module_issue_activity,
"created_module_issues": serializers.serialize(
"json", record_to_create
),
}
),
epoch=int(timezone.now().timestamp()),
)
return Response(
ModuleIssueSerializer(self.get_queryset(), many=True).data,
status=status.HTTP_200_OK,
)
def delete(self, request, slug, project_id, module_id, issue_id):
module_issue = ModuleIssue.objects.get(
workspace__slug=slug,
project_id=project_id,
module_id=module_id,
issue_id=issue_id,
)
module_issue.delete()
issue_activity.delay(
type="module.activity.deleted",
requested_data=json.dumps(
{
"module_id": str(module_id),
"issues": [str(module_issue.issue_id)],
}
),
actor_id=str(request.user.id),
issue_id=str(issue_id),
project_id=str(project_id),
current_instance=None,
epoch=int(timezone.now().timestamp()),
)
return Response(status=status.HTTP_204_NO_CONTENT)
class ModuleArchiveUnarchiveAPIEndpoint(BaseAPIView):
permission_classes = [
ProjectEntityPermission,
]
def get_queryset(self):
return (
Module.objects.filter(project_id=self.kwargs.get("project_id"))
.filter(workspace__slug=self.kwargs.get("slug"))
.filter(archived_at__isnull=False)
.select_related("project")
.select_related("workspace")
.select_related("lead")
.prefetch_related("members")
.prefetch_related(
Prefetch(
"link_module",
queryset=ModuleLink.objects.select_related(
"module", "created_by"
),
)
)
.annotate(
total_issues=Count(
"issue_module",
filter=Q(
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
),
)
.annotate(
completed_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="completed",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
cancelled_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="cancelled",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
started_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="started",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
unstarted_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="unstarted",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.annotate(
backlog_issues=Count(
"issue_module__issue__state__group",
filter=Q(
issue_module__issue__state__group="backlog",
issue_module__issue__archived_at__isnull=True,
issue_module__issue__is_draft=False,
),
distinct=True,
)
)
.order_by(self.kwargs.get("order_by", "-created_at"))
)
def get(self, request, slug, project_id, pk):
return self.paginate(
request=request,
queryset=(self.get_queryset()),
on_results=lambda modules: ModuleSerializer(
modules,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def post(self, request, slug, project_id, pk):
module = Module.objects.get(
pk=pk, project_id=project_id, workspace__slug=slug
)
if module.status not in ["completed", "cancelled"]:
return Response(
{
"error": "Only completed or cancelled modules can be archived"
},
status=status.HTTP_400_BAD_REQUEST,
)
module.archived_at = timezone.now()
module.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, slug, project_id, pk):
module = Module.objects.get(
pk=pk, project_id=project_id, workspace__slug=slug
)
module.archived_at = None
module.save()
return Response(status=status.HTTP_204_NO_CONTENT)

View File

@@ -0,0 +1,350 @@
# Django imports
from django.db import IntegrityError
from django.db.models import Exists, F, Func, OuterRef, Prefetch, Q, Subquery
from django.utils import timezone
# Third party imports
from rest_framework import status
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from plane.api.serializers import ProjectSerializer
from plane.app.permissions import ProjectBasePermission
# Module imports
from plane.db.models import (
Cycle,
Inbox,
IssueProperty,
Module,
Project,
ProjectDeployBoard,
ProjectMember,
State,
Workspace,
)
from .base import BaseAPIView, WebhookMixin
class ProjectAPIEndpoint(WebhookMixin, BaseAPIView):
"""Project Endpoints to create, update, list, retrieve and delete endpoint"""
serializer_class = ProjectSerializer
model = Project
webhook_event = "project"
permission_classes = [
ProjectBasePermission,
]
def get_queryset(self):
return (
Project.objects.filter(workspace__slug=self.kwargs.get("slug"))
.filter(
Q(
project_projectmember__member=self.request.user,
project_projectmember__is_active=True,
)
| Q(network=2)
)
.select_related(
"workspace",
"workspace__owner",
"default_assignee",
"project_lead",
)
.annotate(
is_member=Exists(
ProjectMember.objects.filter(
member=self.request.user,
project_id=OuterRef("pk"),
workspace__slug=self.kwargs.get("slug"),
is_active=True,
)
)
)
.annotate(
total_members=ProjectMember.objects.filter(
project_id=OuterRef("id"),
member__is_bot=False,
is_active=True,
)
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.annotate(
total_cycles=Cycle.objects.filter(project_id=OuterRef("id"))
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.annotate(
total_modules=Module.objects.filter(project_id=OuterRef("id"))
.order_by()
.annotate(count=Func(F("id"), function="Count"))
.values("count")
)
.annotate(
member_role=ProjectMember.objects.filter(
project_id=OuterRef("pk"),
member_id=self.request.user.id,
is_active=True,
).values("role")
)
.annotate(
is_deployed=Exists(
ProjectDeployBoard.objects.filter(
project_id=OuterRef("pk"),
workspace__slug=self.kwargs.get("slug"),
)
)
)
.order_by(self.kwargs.get("order_by", "-created_at"))
.distinct()
)
def get(self, request, slug, pk=None):
if pk is None:
sort_order_query = ProjectMember.objects.filter(
member=request.user,
project_id=OuterRef("pk"),
workspace__slug=self.kwargs.get("slug"),
is_active=True,
).values("sort_order")
projects = (
self.get_queryset()
.annotate(sort_order=Subquery(sort_order_query))
.prefetch_related(
Prefetch(
"project_projectmember",
queryset=ProjectMember.objects.filter(
workspace__slug=slug,
is_active=True,
).select_related("member"),
)
)
.order_by(request.GET.get("order_by", "sort_order"))
)
return self.paginate(
request=request,
queryset=(projects),
on_results=lambda projects: ProjectSerializer(
projects,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
project = self.get_queryset().get(workspace__slug=slug, pk=pk)
serializer = ProjectSerializer(
project,
fields=self.fields,
expand=self.expand,
)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, slug):
try:
workspace = Workspace.objects.get(slug=slug)
serializer = ProjectSerializer(
data={**request.data}, context={"workspace_id": workspace.id}
)
if serializer.is_valid():
serializer.save()
# Add the user as Administrator to the project
_ = ProjectMember.objects.create(
project_id=serializer.data["id"],
member=request.user,
role=20,
)
# Also create the issue property for the user
_ = IssueProperty.objects.create(
project_id=serializer.data["id"],
user=request.user,
)
if serializer.data["project_lead"] is not None and str(
serializer.data["project_lead"]
) != str(request.user.id):
ProjectMember.objects.create(
project_id=serializer.data["id"],
member_id=serializer.data["project_lead"],
role=20,
)
# Also create the issue property for the user
IssueProperty.objects.create(
project_id=serializer.data["id"],
user_id=serializer.data["project_lead"],
)
# Default states
states = [
{
"name": "Backlog",
"color": "#A3A3A3",
"sequence": 15000,
"group": "backlog",
"default": True,
},
{
"name": "Todo",
"color": "#3A3A3A",
"sequence": 25000,
"group": "unstarted",
},
{
"name": "In Progress",
"color": "#F59E0B",
"sequence": 35000,
"group": "started",
},
{
"name": "Done",
"color": "#16A34A",
"sequence": 45000,
"group": "completed",
},
{
"name": "Cancelled",
"color": "#EF4444",
"sequence": 55000,
"group": "cancelled",
},
]
State.objects.bulk_create(
[
State(
name=state["name"],
color=state["color"],
project=serializer.instance,
sequence=state["sequence"],
workspace=serializer.instance.workspace,
group=state["group"],
default=state.get("default", False),
created_by=request.user,
)
for state in states
]
)
project = (
self.get_queryset()
.filter(pk=serializer.data["id"])
.first()
)
serializer = ProjectSerializer(project)
return Response(
serializer.data, status=status.HTTP_201_CREATED
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
except IntegrityError as e:
if "already exists" in str(e):
return Response(
{"name": "The project name is already taken"},
status=status.HTTP_410_GONE,
)
except Workspace.DoesNotExist:
return Response(
{"error": "Workspace does not exist"},
status=status.HTTP_404_NOT_FOUND,
)
except ValidationError:
return Response(
{"identifier": "The project identifier is already taken"},
status=status.HTTP_410_GONE,
)
def patch(self, request, slug, pk):
try:
workspace = Workspace.objects.get(slug=slug)
project = Project.objects.get(pk=pk)
if project.archived_at:
return Response(
{"error": "Archived project cannot be updated"},
status=status.HTTP_400_BAD_REQUEST,
)
serializer = ProjectSerializer(
project,
data={**request.data},
context={"workspace_id": workspace.id},
partial=True,
)
if serializer.is_valid():
serializer.save()
if serializer.data["inbox_view"]:
Inbox.objects.get_or_create(
name=f"{project.name} Inbox",
project=project,
is_default=True,
)
# Create the triage state in Backlog group
State.objects.get_or_create(
name="Triage",
group="triage",
description="Default state for managing all Inbox Issues",
project_id=pk,
color="#ff7700",
is_triage=True,
)
project = (
self.get_queryset()
.filter(pk=serializer.data["id"])
.first()
)
serializer = ProjectSerializer(project)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
except IntegrityError as e:
if "already exists" in str(e):
return Response(
{"name": "The project name is already taken"},
status=status.HTTP_410_GONE,
)
except (Project.DoesNotExist, Workspace.DoesNotExist):
return Response(
{"error": "Project does not exist"},
status=status.HTTP_404_NOT_FOUND,
)
except ValidationError:
return Response(
{"identifier": "The project identifier is already taken"},
status=status.HTTP_410_GONE,
)
def delete(self, request, slug, pk):
project = Project.objects.get(pk=pk, workspace__slug=slug)
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ProjectArchiveUnarchiveAPIEndpoint(BaseAPIView):
permission_classes = [
ProjectBasePermission,
]
def post(self, request, slug, project_id):
project = Project.objects.get(pk=project_id, workspace__slug=slug)
project.archived_at = timezone.now()
project.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, slug, project_id):
project = Project.objects.get(pk=project_id, workspace__slug=slug)
project.archived_at = None
project.save()
return Response(status=status.HTTP_204_NO_CONTENT)

View File

@@ -0,0 +1,161 @@
# Django imports
from django.db import IntegrityError
# Third party imports
from rest_framework import status
from rest_framework.response import Response
from plane.api.serializers import StateSerializer
from plane.app.permissions import ProjectEntityPermission
from plane.db.models import Issue, State
# Module imports
from .base import BaseAPIView
class StateAPIEndpoint(BaseAPIView):
serializer_class = StateSerializer
model = State
permission_classes = [
ProjectEntityPermission,
]
def get_queryset(self):
return (
State.objects.filter(workspace__slug=self.kwargs.get("slug"))
.filter(project_id=self.kwargs.get("project_id"))
.filter(
project__project_projectmember__member=self.request.user,
project__project_projectmember__is_active=True,
)
.filter(is_triage=False)
.filter(project__archived_at__isnull=True)
.select_related("project")
.select_related("workspace")
.distinct()
)
def post(self, request, slug, project_id):
try:
serializer = StateSerializer(
data=request.data, context={"project_id": project_id}
)
if serializer.is_valid():
if (
request.data.get("external_id")
and request.data.get("external_source")
and State.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get("external_source"),
external_id=request.data.get("external_id"),
).exists()
):
state = State.objects.filter(
workspace__slug=slug,
project_id=project_id,
external_id=request.data.get("external_id"),
external_source=request.data.get("external_source"),
).first()
return Response(
{
"error": "State with the same external id and external source already exists",
"id": str(state.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save(project_id=project_id)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
except IntegrityError:
state = State.objects.filter(
workspace__slug=slug,
project_id=project_id,
name=request.data.get("name"),
).first()
return Response(
{
"error": "State with the same name already exists in the project",
"id": str(state.id),
},
status=status.HTTP_409_CONFLICT,
)
def get(self, request, slug, project_id, state_id=None):
if state_id:
serializer = StateSerializer(
self.get_queryset().get(pk=state_id),
fields=self.fields,
expand=self.expand,
)
return Response(serializer.data, status=status.HTTP_200_OK)
return self.paginate(
request=request,
queryset=(self.get_queryset()),
on_results=lambda states: StateSerializer(
states,
many=True,
fields=self.fields,
expand=self.expand,
).data,
)
def delete(self, request, slug, project_id, state_id):
state = State.objects.get(
is_triage=False,
pk=state_id,
project_id=project_id,
workspace__slug=slug,
)
if state.default:
return Response(
{"error": "Default state cannot be deleted"},
status=status.HTTP_400_BAD_REQUEST,
)
# Check for any issues in the state
issue_exist = Issue.issue_objects.filter(state=state_id).exists()
if issue_exist:
return Response(
{
"error": "The state is not empty, only empty states can be deleted"
},
status=status.HTTP_400_BAD_REQUEST,
)
state.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def patch(self, request, slug, project_id, state_id=None):
state = State.objects.get(
workspace__slug=slug, project_id=project_id, pk=state_id
)
serializer = StateSerializer(state, data=request.data, partial=True)
if serializer.is_valid():
if (
str(request.data.get("external_id"))
and (state.external_id != str(request.data.get("external_id")))
and State.objects.filter(
project_id=project_id,
workspace__slug=slug,
external_source=request.data.get(
"external_source", state.external_source
),
external_id=request.data.get("external_id"),
).exists()
):
return Response(
{
"error": "State with the same external id and external source already exists",
"id": str(state.id),
},
status=status.HTTP_409_CONFLICT,
)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)

View File

@@ -0,0 +1,50 @@
# Django imports
from django.utils import timezone
from django.db.models import Q
# Third party imports
from rest_framework import authentication
from rest_framework.exceptions import AuthenticationFailed
# Module imports
from plane.db.models import APIToken
class APIKeyAuthentication(authentication.BaseAuthentication):
"""
Authentication with an API Key
"""
www_authenticate_realm = "api"
media_type = "application/json"
auth_header_name = "X-Api-Key"
def get_api_token(self, request):
return request.headers.get(self.auth_header_name)
def validate_api_token(self, token):
try:
api_token = APIToken.objects.get(
Q(
Q(expired_at__gt=timezone.now())
| Q(expired_at__isnull=True)
),
token=token,
is_active=True,
)
except APIToken.DoesNotExist:
raise AuthenticationFailed("Given API token is not valid")
# save api token last used
api_token.last_used = timezone.now()
api_token.save(update_fields=["last_used"])
return (api_token.user, api_token.token)
def authenticate(self, request):
token = self.get_api_token(request=request)
if not token:
return None
# Validate the API token
user, token = self.validate_api_token(token)
return user, token

View File

@@ -0,0 +1,14 @@
from .workspace import (
WorkSpaceBasePermission,
WorkspaceOwnerPermission,
WorkSpaceAdminPermission,
WorkspaceEntityPermission,
WorkspaceViewerPermission,
WorkspaceUserPermission,
)
from .project import (
ProjectBasePermission,
ProjectEntityPermission,
ProjectMemberPermission,
ProjectLitePermission,
)

View File

@@ -0,0 +1,111 @@
# Third Party imports
from rest_framework.permissions import SAFE_METHODS, BasePermission
# Module import
from plane.db.models import ProjectMember, WorkspaceMember
# Permission Mappings
Admin = 20
Member = 15
Viewer = 10
Guest = 5
class ProjectBasePermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
## Safe Methods -> Handle the filtering logic in queryset
if request.method in SAFE_METHODS:
return WorkspaceMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
is_active=True,
).exists()
## Only workspace owners or admins can create the projects
if request.method == "POST":
return WorkspaceMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
role__in=[Admin, Member],
is_active=True,
).exists()
## Only Project Admins can update project attributes
return ProjectMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
role=Admin,
project_id=view.project_id,
is_active=True,
).exists()
class ProjectMemberPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
## Safe Methods -> Handle the filtering logic in queryset
if request.method in SAFE_METHODS:
return ProjectMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
is_active=True,
).exists()
## Only workspace owners or admins can create the projects
if request.method == "POST":
return WorkspaceMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
role__in=[Admin, Member],
is_active=True,
).exists()
## Only Project Admins can update project attributes
return ProjectMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
role__in=[Admin, Member],
project_id=view.project_id,
is_active=True,
).exists()
class ProjectEntityPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
## Safe Methods -> Handle the filtering logic in queryset
if request.method in SAFE_METHODS:
return ProjectMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
project_id=view.project_id,
is_active=True,
).exists()
## Only project members or admins can create and edit the project attributes
return ProjectMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
role__in=[Admin, Member],
project_id=view.project_id,
is_active=True,
).exists()
class ProjectLitePermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return ProjectMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
project_id=view.project_id,
is_active=True,
).exists()

View File

@@ -0,0 +1,115 @@
# Third Party imports
from rest_framework.permissions import BasePermission, SAFE_METHODS
# Module imports
from plane.db.models import WorkspaceMember
# Permission Mappings
Owner = 20
Admin = 15
Member = 10
Guest = 5
# TODO: Move the below logic to python match - python v3.10
class WorkSpaceBasePermission(BasePermission):
def has_permission(self, request, view):
# allow anyone to create a workspace
if request.user.is_anonymous:
return False
if request.method == "POST":
return True
## Safe Methods
if request.method in SAFE_METHODS:
return True
# allow only admins and owners to update the workspace settings
if request.method in ["PUT", "PATCH"]:
return WorkspaceMember.objects.filter(
member=request.user,
workspace__slug=view.workspace_slug,
role__in=[Owner, Admin],
is_active=True,
).exists()
# allow only owner to delete the workspace
if request.method == "DELETE":
return WorkspaceMember.objects.filter(
member=request.user,
workspace__slug=view.workspace_slug,
role=Owner,
is_active=True,
).exists()
class WorkspaceOwnerPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return WorkspaceMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
role=Owner,
).exists()
class WorkSpaceAdminPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return WorkspaceMember.objects.filter(
member=request.user,
workspace__slug=view.workspace_slug,
role__in=[Owner, Admin],
is_active=True,
).exists()
class WorkspaceEntityPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
## Safe Methods -> Handle the filtering logic in queryset
if request.method in SAFE_METHODS:
return WorkspaceMember.objects.filter(
workspace__slug=view.workspace_slug,
member=request.user,
is_active=True,
).exists()
return WorkspaceMember.objects.filter(
member=request.user,
workspace__slug=view.workspace_slug,
role__in=[Owner, Admin],
is_active=True,
).exists()
class WorkspaceViewerPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return WorkspaceMember.objects.filter(
member=request.user,
workspace__slug=view.workspace_slug,
is_active=True,
).exists()
class WorkspaceUserPermission(BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return WorkspaceMember.objects.filter(
member=request.user,
workspace__slug=view.workspace_slug,
is_active=True,
).exists()

View File

@@ -0,0 +1,126 @@
from .base import BaseSerializer
from .user import (
UserSerializer,
UserLiteSerializer,
ChangePasswordSerializer,
ResetPasswordSerializer,
UserAdminLiteSerializer,
UserMeSerializer,
UserMeSettingsSerializer,
)
from .workspace import (
WorkSpaceSerializer,
WorkSpaceMemberSerializer,
TeamSerializer,
WorkSpaceMemberInviteSerializer,
WorkspaceLiteSerializer,
WorkspaceThemeSerializer,
WorkspaceMemberAdminSerializer,
WorkspaceMemberMeSerializer,
WorkspaceUserPropertiesSerializer,
)
from .project import (
ProjectSerializer,
ProjectListSerializer,
ProjectDetailSerializer,
ProjectMemberSerializer,
ProjectMemberInviteSerializer,
ProjectIdentifierSerializer,
ProjectFavoriteSerializer,
ProjectLiteSerializer,
ProjectMemberLiteSerializer,
ProjectDeployBoardSerializer,
ProjectMemberAdminSerializer,
ProjectPublicMemberSerializer,
ProjectMemberRoleSerializer,
)
from .state import StateSerializer, StateLiteSerializer
from .view import (
GlobalViewSerializer,
IssueViewSerializer,
IssueViewFavoriteSerializer,
)
from .cycle import (
CycleSerializer,
CycleIssueSerializer,
CycleFavoriteSerializer,
CycleWriteSerializer,
CycleUserPropertiesSerializer,
)
from .asset import FileAssetSerializer
from .issue import (
IssueCreateSerializer,
IssueActivitySerializer,
IssueCommentSerializer,
IssuePropertySerializer,
IssueAssigneeSerializer,
LabelSerializer,
IssueSerializer,
IssueFlatSerializer,
IssueStateSerializer,
IssueLinkSerializer,
IssueInboxSerializer,
IssueLiteSerializer,
IssueAttachmentSerializer,
IssueSubscriberSerializer,
IssueReactionSerializer,
CommentReactionSerializer,
IssueVoteSerializer,
IssueRelationSerializer,
RelatedIssueSerializer,
IssuePublicSerializer,
IssueDetailSerializer,
IssueReactionLiteSerializer,
IssueAttachmentLiteSerializer,
IssueLinkLiteSerializer,
)
from .module import (
ModuleDetailSerializer,
ModuleWriteSerializer,
ModuleSerializer,
ModuleIssueSerializer,
ModuleLinkSerializer,
ModuleFavoriteSerializer,
ModuleUserPropertiesSerializer,
)
from .api import APITokenSerializer, APITokenReadSerializer
from .importer import ImporterSerializer
from .page import (
PageSerializer,
PageLogSerializer,
SubPageSerializer,
PageDetailSerializer,
PageFavoriteSerializer,
)
from .estimate import (
EstimateSerializer,
EstimatePointSerializer,
EstimateReadSerializer,
WorkspaceEstimateSerializer,
)
from .inbox import (
InboxSerializer,
InboxIssueSerializer,
IssueStateInboxSerializer,
InboxIssueLiteSerializer,
InboxIssueDetailSerializer,
)
from .analytic import AnalyticViewSerializer
from .notification import (
NotificationSerializer,
UserNotificationPreferenceSerializer,
)
from .exporter import ExporterHistorySerializer
from .webhook import WebhookSerializer, WebhookLogSerializer
from .dashboard import DashboardSerializer, WidgetSerializer

View File

@@ -0,0 +1,30 @@
from .base import BaseSerializer
from plane.db.models import AnalyticView
from plane.utils.issue_filters import issue_filters
class AnalyticViewSerializer(BaseSerializer):
class Meta:
model = AnalyticView
fields = "__all__"
read_only_fields = [
"workspace",
"query",
]
def create(self, validated_data):
query_params = validated_data.get("query_dict", {})
if bool(query_params):
validated_data["query"] = issue_filters(query_params, "POST")
else:
validated_data["query"] = {}
return AnalyticView.objects.create(**validated_data)
def update(self, instance, validated_data):
query_params = validated_data.get("query_data", {})
if bool(query_params):
validated_data["query"] = issue_filters(query_params, "POST")
else:
validated_data["query"] = {}
validated_data["query"] = issue_filters(query_params, "PATCH")
return super().update(instance, validated_data)

View File

@@ -0,0 +1,28 @@
from .base import BaseSerializer
from plane.db.models import APIToken, APIActivityLog
class APITokenSerializer(BaseSerializer):
class Meta:
model = APIToken
fields = "__all__"
read_only_fields = [
"token",
"expired_at",
"created_at",
"updated_at",
"workspace",
"user",
]
class APITokenReadSerializer(BaseSerializer):
class Meta:
model = APIToken
exclude = ("token",)
class APIActivityLogSerializer(BaseSerializer):
class Meta:
model = APIActivityLog
fields = "__all__"

View File

@@ -0,0 +1,14 @@
from .base import BaseSerializer
from plane.db.models import FileAsset
class FileAssetSerializer(BaseSerializer):
class Meta:
model = FileAsset
fields = "__all__"
read_only_fields = [
"created_by",
"updated_by",
"created_at",
"updated_at",
]

View File

@@ -0,0 +1,181 @@
from rest_framework import serializers
class BaseSerializer(serializers.ModelSerializer):
id = serializers.PrimaryKeyRelatedField(read_only=True)
class DynamicBaseSerializer(BaseSerializer):
def __init__(self, *args, **kwargs):
# If 'fields' is provided in the arguments, remove it and store it separately.
# This is done so as not to pass this custom argument up to the superclass.
fields = kwargs.pop("fields", [])
self.expand = kwargs.pop("expand", []) or []
fields = self.expand
# Call the initialization of the superclass.
super().__init__(*args, **kwargs)
# If 'fields' was provided, filter the fields of the serializer accordingly.
if fields is not None:
self.fields = self._filter_fields(fields)
def _filter_fields(self, fields):
"""
Adjust the serializer's fields based on the provided 'fields' list.
:param fields: List or dictionary specifying which fields to include in the serializer.
:return: The updated fields for the serializer.
"""
# Check each field_name in the provided fields.
for field_name in fields:
# If the field is a dictionary (indicating nested fields),
# loop through its keys and values.
if isinstance(field_name, dict):
for key, value in field_name.items():
# If the value of this nested field is a list,
# perform a recursive filter on it.
if isinstance(value, list):
self._filter_fields(self.fields[key], value)
# Create a list to store allowed fields.
allowed = []
for item in fields:
# If the item is a string, it directly represents a field's name.
if isinstance(item, str):
allowed.append(item)
# If the item is a dictionary, it represents a nested field.
# Add the key of this dictionary to the allowed list.
elif isinstance(item, dict):
allowed.append(list(item.keys())[0])
for field in allowed:
if field not in self.fields:
from . import (
WorkspaceLiteSerializer,
ProjectLiteSerializer,
UserLiteSerializer,
StateLiteSerializer,
IssueSerializer,
LabelSerializer,
CycleIssueSerializer,
IssueLiteSerializer,
IssueRelationSerializer,
InboxIssueLiteSerializer,
IssueReactionLiteSerializer,
IssueAttachmentLiteSerializer,
IssueLinkLiteSerializer,
)
# Expansion mapper
expansion = {
"user": UserLiteSerializer,
"workspace": WorkspaceLiteSerializer,
"project": ProjectLiteSerializer,
"default_assignee": UserLiteSerializer,
"project_lead": UserLiteSerializer,
"state": StateLiteSerializer,
"created_by": UserLiteSerializer,
"issue": IssueSerializer,
"actor": UserLiteSerializer,
"owned_by": UserLiteSerializer,
"members": UserLiteSerializer,
"assignees": UserLiteSerializer,
"labels": LabelSerializer,
"issue_cycle": CycleIssueSerializer,
"parent": IssueLiteSerializer,
"issue_relation": IssueRelationSerializer,
"issue_inbox": InboxIssueLiteSerializer,
"issue_reactions": IssueReactionLiteSerializer,
"issue_attachment": IssueAttachmentLiteSerializer,
"issue_link": IssueLinkLiteSerializer,
"sub_issues": IssueLiteSerializer,
}
self.fields[field] = expansion[field](
many=(
True
if field
in [
"members",
"assignees",
"labels",
"issue_cycle",
"issue_relation",
"issue_inbox",
"issue_reactions",
"issue_attachment",
"issue_link",
"sub_issues",
]
else False
)
)
return self.fields
def to_representation(self, instance):
response = super().to_representation(instance)
# Ensure 'expand' is iterable before processing
if self.expand:
for expand in self.expand:
if expand in self.fields:
# Import all the expandable serializers
from . import (
WorkspaceLiteSerializer,
ProjectLiteSerializer,
UserLiteSerializer,
StateLiteSerializer,
IssueSerializer,
LabelSerializer,
CycleIssueSerializer,
IssueRelationSerializer,
InboxIssueLiteSerializer,
IssueLiteSerializer,
IssueReactionLiteSerializer,
IssueAttachmentLiteSerializer,
IssueLinkLiteSerializer,
)
# Expansion mapper
expansion = {
"user": UserLiteSerializer,
"workspace": WorkspaceLiteSerializer,
"project": ProjectLiteSerializer,
"default_assignee": UserLiteSerializer,
"project_lead": UserLiteSerializer,
"state": StateLiteSerializer,
"created_by": UserLiteSerializer,
"issue": IssueSerializer,
"actor": UserLiteSerializer,
"owned_by": UserLiteSerializer,
"members": UserLiteSerializer,
"assignees": UserLiteSerializer,
"labels": LabelSerializer,
"issue_cycle": CycleIssueSerializer,
"parent": IssueLiteSerializer,
"issue_relation": IssueRelationSerializer,
"issue_inbox": InboxIssueLiteSerializer,
"issue_reactions": IssueReactionLiteSerializer,
"issue_attachment": IssueAttachmentLiteSerializer,
"issue_link": IssueLinkLiteSerializer,
"sub_issues": IssueLiteSerializer,
}
# Check if field in expansion then expand the field
if expand in expansion:
if isinstance(response.get(expand), list):
exp_serializer = expansion[expand](
getattr(instance, expand), many=True
)
else:
exp_serializer = expansion[expand](
getattr(instance, expand)
)
response[expand] = exp_serializer.data
else:
# You might need to handle this case differently
response[expand] = getattr(
instance, f"{expand}_id", None
)
return response

View File

@@ -0,0 +1,118 @@
# Third party imports
from rest_framework import serializers
# Module imports
from .base import BaseSerializer
from .issue import IssueStateSerializer
from plane.db.models import (
Cycle,
CycleIssue,
CycleFavorite,
CycleUserProperties,
)
class CycleWriteSerializer(BaseSerializer):
def validate(self, data):
if (
data.get("start_date", None) is not None
and data.get("end_date", None) is not None
and data.get("start_date", None) > data.get("end_date", None)
):
raise serializers.ValidationError(
"Start date cannot exceed end date"
)
return data
class Meta:
model = Cycle
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"owned_by",
"archived_at",
]
class CycleSerializer(BaseSerializer):
# favorite
is_favorite = serializers.BooleanField(read_only=True)
total_issues = serializers.IntegerField(read_only=True)
# state group wise distribution
cancelled_issues = serializers.IntegerField(read_only=True)
completed_issues = serializers.IntegerField(read_only=True)
started_issues = serializers.IntegerField(read_only=True)
unstarted_issues = serializers.IntegerField(read_only=True)
backlog_issues = serializers.IntegerField(read_only=True)
# active | draft | upcoming | completed
status = serializers.CharField(read_only=True)
class Meta:
model = Cycle
fields = [
# necessary fields
"id",
"workspace_id",
"project_id",
# model fields
"name",
"description",
"start_date",
"end_date",
"owned_by_id",
"view_props",
"sort_order",
"external_source",
"external_id",
"progress_snapshot",
# meta fields
"is_favorite",
"total_issues",
"cancelled_issues",
"completed_issues",
"started_issues",
"unstarted_issues",
"backlog_issues",
"status",
]
read_only_fields = fields
class CycleIssueSerializer(BaseSerializer):
issue_detail = IssueStateSerializer(read_only=True, source="issue")
sub_issues_count = serializers.IntegerField(read_only=True)
class Meta:
model = CycleIssue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"cycle",
]
class CycleFavoriteSerializer(BaseSerializer):
cycle_detail = CycleSerializer(source="cycle", read_only=True)
class Meta:
model = CycleFavorite
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"user",
]
class CycleUserPropertiesSerializer(BaseSerializer):
class Meta:
model = CycleUserProperties
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"cycle" "user",
]

View File

@@ -0,0 +1,21 @@
# Module imports
from .base import BaseSerializer
from plane.db.models import Dashboard, Widget
# Third party frameworks
from rest_framework import serializers
class DashboardSerializer(BaseSerializer):
class Meta:
model = Dashboard
fields = "__all__"
class WidgetSerializer(BaseSerializer):
is_visible = serializers.BooleanField(read_only=True)
widget_filters = serializers.JSONField(read_only=True)
class Meta:
model = Widget
fields = ["id", "key", "is_visible", "widget_filters"]

View File

@@ -0,0 +1,76 @@
# Module imports
from .base import BaseSerializer
from plane.db.models import Estimate, EstimatePoint
from plane.app.serializers import (
WorkspaceLiteSerializer,
ProjectLiteSerializer,
)
from rest_framework import serializers
class EstimateSerializer(BaseSerializer):
workspace_detail = WorkspaceLiteSerializer(
read_only=True, source="workspace"
)
project_detail = ProjectLiteSerializer(read_only=True, source="project")
class Meta:
model = Estimate
fields = "__all__"
read_only_fields = [
"workspace",
"project",
]
class EstimatePointSerializer(BaseSerializer):
def validate(self, data):
if not data:
raise serializers.ValidationError("Estimate points are required")
value = data.get("value")
if value and len(value) > 20:
raise serializers.ValidationError(
"Value can't be more than 20 characters"
)
return data
class Meta:
model = EstimatePoint
fields = "__all__"
read_only_fields = [
"estimate",
"workspace",
"project",
]
class EstimateReadSerializer(BaseSerializer):
points = EstimatePointSerializer(read_only=True, many=True)
workspace_detail = WorkspaceLiteSerializer(
read_only=True, source="workspace"
)
project_detail = ProjectLiteSerializer(read_only=True, source="project")
class Meta:
model = Estimate
fields = "__all__"
read_only_fields = [
"points",
"name",
"description",
]
class WorkspaceEstimateSerializer(BaseSerializer):
points = EstimatePointSerializer(read_only=True, many=True)
class Meta:
model = Estimate
fields = "__all__"
read_only_fields = [
"points",
"name",
"description",
]

View File

@@ -0,0 +1,28 @@
# Module imports
from .base import BaseSerializer
from plane.db.models import ExporterHistory
from .user import UserLiteSerializer
class ExporterHistorySerializer(BaseSerializer):
initiated_by_detail = UserLiteSerializer(
source="initiated_by", read_only=True
)
class Meta:
model = ExporterHistory
fields = [
"id",
"created_at",
"updated_at",
"project",
"provider",
"status",
"url",
"initiated_by",
"initiated_by_detail",
"token",
"created_by",
"updated_by",
]
read_only_fields = fields

View File

@@ -0,0 +1,20 @@
# Module imports
from .base import BaseSerializer
from .user import UserLiteSerializer
from .project import ProjectLiteSerializer
from .workspace import WorkspaceLiteSerializer
from plane.db.models import Importer
class ImporterSerializer(BaseSerializer):
initiated_by_detail = UserLiteSerializer(
source="initiated_by", read_only=True
)
project_detail = ProjectLiteSerializer(source="project", read_only=True)
workspace_detail = WorkspaceLiteSerializer(
source="workspace", read_only=True
)
class Meta:
model = Importer
fields = "__all__"

View File

@@ -0,0 +1,109 @@
# Third party frameworks
from rest_framework import serializers
# Module imports
from .base import BaseSerializer
from .issue import (
IssueInboxSerializer,
LabelLiteSerializer,
IssueDetailSerializer,
)
from .project import ProjectLiteSerializer
from .state import StateLiteSerializer
from .user import UserLiteSerializer
from plane.db.models import Inbox, InboxIssue, Issue
class InboxSerializer(BaseSerializer):
project_detail = ProjectLiteSerializer(source="project", read_only=True)
pending_issue_count = serializers.IntegerField(read_only=True)
class Meta:
model = Inbox
fields = "__all__"
read_only_fields = [
"project",
"workspace",
]
class InboxIssueSerializer(BaseSerializer):
issue = IssueInboxSerializer(read_only=True)
class Meta:
model = InboxIssue
fields = [
"id",
"status",
"duplicate_to",
"snoozed_till",
"source",
"issue",
"created_by",
]
read_only_fields = [
"project",
"workspace",
]
def to_representation(self, instance):
# Pass the annotated fields to the Issue instance if they exist
if hasattr(instance, "label_ids"):
instance.issue.label_ids = instance.label_ids
return super().to_representation(instance)
class InboxIssueDetailSerializer(BaseSerializer):
issue = IssueDetailSerializer(read_only=True)
duplicate_issue_detail = IssueInboxSerializer(
read_only=True, source="duplicate_to"
)
class Meta:
model = InboxIssue
fields = [
"id",
"status",
"duplicate_to",
"snoozed_till",
"duplicate_issue_detail",
"source",
"issue",
]
read_only_fields = [
"project",
"workspace",
]
def to_representation(self, instance):
# Pass the annotated fields to the Issue instance if they exist
if hasattr(instance, "assignee_ids"):
instance.issue.assignee_ids = instance.assignee_ids
if hasattr(instance, "label_ids"):
instance.issue.label_ids = instance.label_ids
return super().to_representation(instance)
class InboxIssueLiteSerializer(BaseSerializer):
class Meta:
model = InboxIssue
fields = ["id", "status", "duplicate_to", "snoozed_till", "source"]
read_only_fields = fields
class IssueStateInboxSerializer(BaseSerializer):
state_detail = StateLiteSerializer(read_only=True, source="state")
project_detail = ProjectLiteSerializer(read_only=True, source="project")
label_details = LabelLiteSerializer(
read_only=True, source="labels", many=True
)
assignee_details = UserLiteSerializer(
read_only=True, source="assignees", many=True
)
sub_issues_count = serializers.IntegerField(read_only=True)
issue_inbox = InboxIssueLiteSerializer(read_only=True, many=True)
class Meta:
model = Issue
fields = "__all__"

View File

@@ -0,0 +1,757 @@
# Django imports
from django.utils import timezone
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
# Third Party imports
from rest_framework import serializers
# Module imports
from .base import BaseSerializer, DynamicBaseSerializer
from .user import UserLiteSerializer
from .state import StateLiteSerializer
from .project import ProjectLiteSerializer
from .workspace import WorkspaceLiteSerializer
from plane.db.models import (
User,
Issue,
IssueActivity,
IssueComment,
IssueProperty,
IssueAssignee,
IssueSubscriber,
IssueLabel,
Label,
CycleIssue,
Cycle,
Module,
ModuleIssue,
IssueLink,
IssueAttachment,
IssueReaction,
CommentReaction,
IssueVote,
IssueRelation,
State,
)
class IssueFlatSerializer(BaseSerializer):
## Contain only flat fields
class Meta:
model = Issue
fields = [
"id",
"name",
"description",
"description_html",
"priority",
"start_date",
"target_date",
"sequence_id",
"sort_order",
"is_draft",
]
class IssueProjectLiteSerializer(BaseSerializer):
project_detail = ProjectLiteSerializer(source="project", read_only=True)
class Meta:
model = Issue
fields = [
"id",
"project_detail",
"name",
"sequence_id",
]
read_only_fields = fields
##TODO: Find a better way to write this serializer
## Find a better approach to save manytomany?
class IssueCreateSerializer(BaseSerializer):
# ids
state_id = serializers.PrimaryKeyRelatedField(
source="state",
queryset=State.objects.all(),
required=False,
allow_null=True,
)
parent_id = serializers.PrimaryKeyRelatedField(
source="parent",
queryset=Issue.objects.all(),
required=False,
allow_null=True,
)
label_ids = serializers.ListField(
child=serializers.PrimaryKeyRelatedField(queryset=Label.objects.all()),
write_only=True,
required=False,
)
assignee_ids = serializers.ListField(
child=serializers.PrimaryKeyRelatedField(queryset=User.objects.all()),
write_only=True,
required=False,
)
class Meta:
model = Issue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
def to_representation(self, instance):
data = super().to_representation(instance)
assignee_ids = self.initial_data.get("assignee_ids")
data["assignee_ids"] = assignee_ids if assignee_ids else []
label_ids = self.initial_data.get("label_ids")
data["label_ids"] = label_ids if label_ids else []
return data
def validate(self, data):
if (
data.get("start_date", None) is not None
and data.get("target_date", None) is not None
and data.get("start_date", None) > data.get("target_date", None)
):
raise serializers.ValidationError(
"Start date cannot exceed target date"
)
return data
def create(self, validated_data):
assignees = validated_data.pop("assignee_ids", None)
labels = validated_data.pop("label_ids", None)
project_id = self.context["project_id"]
workspace_id = self.context["workspace_id"]
default_assignee_id = self.context["default_assignee_id"]
issue = Issue.objects.create(**validated_data, project_id=project_id)
# Issue Audit Users
created_by_id = issue.created_by_id
updated_by_id = issue.updated_by_id
if assignees is not None and len(assignees):
IssueAssignee.objects.bulk_create(
[
IssueAssignee(
assignee=user,
issue=issue,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for user in assignees
],
batch_size=10,
)
else:
# Then assign it to default assignee
if default_assignee_id is not None:
IssueAssignee.objects.create(
assignee_id=default_assignee_id,
issue=issue,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
if labels is not None and len(labels):
IssueLabel.objects.bulk_create(
[
IssueLabel(
label=label,
issue=issue,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for label in labels
],
batch_size=10,
)
return issue
def update(self, instance, validated_data):
assignees = validated_data.pop("assignee_ids", None)
labels = validated_data.pop("label_ids", None)
# Related models
project_id = instance.project_id
workspace_id = instance.workspace_id
created_by_id = instance.created_by_id
updated_by_id = instance.updated_by_id
if assignees is not None:
IssueAssignee.objects.filter(issue=instance).delete()
IssueAssignee.objects.bulk_create(
[
IssueAssignee(
assignee=user,
issue=instance,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for user in assignees
],
batch_size=10,
)
if labels is not None:
IssueLabel.objects.filter(issue=instance).delete()
IssueLabel.objects.bulk_create(
[
IssueLabel(
label=label,
issue=instance,
project_id=project_id,
workspace_id=workspace_id,
created_by_id=created_by_id,
updated_by_id=updated_by_id,
)
for label in labels
],
batch_size=10,
)
# Time updation occues even when other related models are updated
instance.updated_at = timezone.now()
return super().update(instance, validated_data)
class IssueActivitySerializer(BaseSerializer):
actor_detail = UserLiteSerializer(read_only=True, source="actor")
issue_detail = IssueFlatSerializer(read_only=True, source="issue")
project_detail = ProjectLiteSerializer(read_only=True, source="project")
workspace_detail = WorkspaceLiteSerializer(
read_only=True, source="workspace"
)
class Meta:
model = IssueActivity
fields = "__all__"
class IssuePropertySerializer(BaseSerializer):
class Meta:
model = IssueProperty
fields = "__all__"
read_only_fields = [
"user",
"workspace",
"project",
]
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = [
"parent",
"name",
"color",
"id",
"project_id",
"workspace_id",
"sort_order",
]
read_only_fields = [
"workspace",
"project",
]
class LabelLiteSerializer(BaseSerializer):
class Meta:
model = Label
fields = [
"id",
"name",
"color",
]
class IssueLabelSerializer(BaseSerializer):
class Meta:
model = IssueLabel
fields = "__all__"
read_only_fields = [
"workspace",
"project",
]
class IssueRelationSerializer(BaseSerializer):
id = serializers.UUIDField(source="related_issue.id", read_only=True)
project_id = serializers.PrimaryKeyRelatedField(
source="related_issue.project_id", read_only=True
)
sequence_id = serializers.IntegerField(
source="related_issue.sequence_id", read_only=True
)
name = serializers.CharField(source="related_issue.name", read_only=True)
relation_type = serializers.CharField(read_only=True)
class Meta:
model = IssueRelation
fields = [
"id",
"project_id",
"sequence_id",
"relation_type",
"name",
]
read_only_fields = [
"workspace",
"project",
]
class RelatedIssueSerializer(BaseSerializer):
id = serializers.UUIDField(source="issue.id", read_only=True)
project_id = serializers.PrimaryKeyRelatedField(
source="issue.project_id", read_only=True
)
sequence_id = serializers.IntegerField(
source="issue.sequence_id", read_only=True
)
name = serializers.CharField(source="issue.name", read_only=True)
relation_type = serializers.CharField(read_only=True)
class Meta:
model = IssueRelation
fields = [
"id",
"project_id",
"sequence_id",
"relation_type",
"name",
]
read_only_fields = [
"workspace",
"project",
]
class IssueAssigneeSerializer(BaseSerializer):
assignee_details = UserLiteSerializer(read_only=True, source="assignee")
class Meta:
model = IssueAssignee
fields = "__all__"
class CycleBaseSerializer(BaseSerializer):
class Meta:
model = Cycle
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class IssueCycleDetailSerializer(BaseSerializer):
cycle_detail = CycleBaseSerializer(read_only=True, source="cycle")
class Meta:
model = CycleIssue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class ModuleBaseSerializer(BaseSerializer):
class Meta:
model = Module
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class IssueModuleDetailSerializer(BaseSerializer):
module_detail = ModuleBaseSerializer(read_only=True, source="module")
class Meta:
model = ModuleIssue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class IssueLinkSerializer(BaseSerializer):
created_by_detail = UserLiteSerializer(read_only=True, source="created_by")
class Meta:
model = IssueLink
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
"issue",
]
def validate_url(self, value):
# Check URL format
validate_url = URLValidator()
try:
validate_url(value)
except ValidationError:
raise serializers.ValidationError("Invalid URL format.")
# Check URL scheme
if not value.startswith(('http://', 'https://')):
raise serializers.ValidationError("Invalid URL scheme.")
return value
# Validation if url already exists
def create(self, validated_data):
if IssueLink.objects.filter(
url=validated_data.get("url"),
issue_id=validated_data.get("issue_id"),
).exists():
raise serializers.ValidationError(
{"error": "URL already exists for this Issue"}
)
return IssueLink.objects.create(**validated_data)
def update(self, instance, validated_data):
if IssueLink.objects.filter(
url=validated_data.get("url"),
issue_id=instance.issue_id,
).exists():
raise serializers.ValidationError(
{"error": "URL already exists for this Issue"}
)
return super().update(instance, validated_data)
class IssueLinkLiteSerializer(BaseSerializer):
class Meta:
model = IssueLink
fields = [
"id",
"issue_id",
"title",
"url",
"metadata",
"created_by_id",
"created_at",
]
read_only_fields = fields
class IssueAttachmentSerializer(BaseSerializer):
class Meta:
model = IssueAttachment
fields = "__all__"
read_only_fields = [
"created_by",
"updated_by",
"created_at",
"updated_at",
"workspace",
"project",
"issue",
]
class IssueAttachmentLiteSerializer(DynamicBaseSerializer):
class Meta:
model = IssueAttachment
fields = [
"id",
"asset",
"attributes",
"issue_id",
"updated_at",
"updated_by_id",
]
read_only_fields = fields
class IssueReactionSerializer(BaseSerializer):
actor_detail = UserLiteSerializer(read_only=True, source="actor")
class Meta:
model = IssueReaction
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"issue",
"actor",
]
class IssueReactionLiteSerializer(DynamicBaseSerializer):
class Meta:
model = IssueReaction
fields = [
"id",
"actor",
"issue",
"reaction",
]
class CommentReactionSerializer(BaseSerializer):
class Meta:
model = CommentReaction
fields = "__all__"
read_only_fields = ["workspace", "project", "comment", "actor"]
class IssueVoteSerializer(BaseSerializer):
actor_detail = UserLiteSerializer(read_only=True, source="actor")
class Meta:
model = IssueVote
fields = [
"issue",
"vote",
"workspace",
"project",
"actor",
"actor_detail",
]
read_only_fields = fields
class IssueCommentSerializer(BaseSerializer):
actor_detail = UserLiteSerializer(read_only=True, source="actor")
issue_detail = IssueFlatSerializer(read_only=True, source="issue")
project_detail = ProjectLiteSerializer(read_only=True, source="project")
workspace_detail = WorkspaceLiteSerializer(
read_only=True, source="workspace"
)
comment_reactions = CommentReactionSerializer(read_only=True, many=True)
is_member = serializers.BooleanField(read_only=True)
class Meta:
model = IssueComment
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"issue",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class IssueStateFlatSerializer(BaseSerializer):
state_detail = StateLiteSerializer(read_only=True, source="state")
project_detail = ProjectLiteSerializer(read_only=True, source="project")
class Meta:
model = Issue
fields = [
"id",
"sequence_id",
"name",
"state_detail",
"project_detail",
]
# Issue Serializer with state details
class IssueStateSerializer(DynamicBaseSerializer):
label_details = LabelLiteSerializer(
read_only=True, source="labels", many=True
)
state_detail = StateLiteSerializer(read_only=True, source="state")
project_detail = ProjectLiteSerializer(read_only=True, source="project")
assignee_details = UserLiteSerializer(
read_only=True, source="assignees", many=True
)
sub_issues_count = serializers.IntegerField(read_only=True)
attachment_count = serializers.IntegerField(read_only=True)
link_count = serializers.IntegerField(read_only=True)
class Meta:
model = Issue
fields = "__all__"
class IssueInboxSerializer(DynamicBaseSerializer):
label_ids = serializers.ListField(
child=serializers.UUIDField(),
required=False,
)
class Meta:
model = Issue
fields = [
"id",
"name",
"priority",
"sequence_id",
"project_id",
"created_at",
"label_ids",
]
read_only_fields = fields
class IssueSerializer(DynamicBaseSerializer):
# ids
cycle_id = serializers.PrimaryKeyRelatedField(read_only=True)
module_ids = serializers.ListField(
child=serializers.UUIDField(),
required=False,
)
# Many to many
label_ids = serializers.ListField(
child=serializers.UUIDField(),
required=False,
)
assignee_ids = serializers.ListField(
child=serializers.UUIDField(),
required=False,
)
# Count items
sub_issues_count = serializers.IntegerField(read_only=True)
attachment_count = serializers.IntegerField(read_only=True)
link_count = serializers.IntegerField(read_only=True)
class Meta:
model = Issue
fields = [
"id",
"name",
"state_id",
"sort_order",
"completed_at",
"estimate_point",
"priority",
"start_date",
"target_date",
"sequence_id",
"project_id",
"parent_id",
"cycle_id",
"module_ids",
"label_ids",
"assignee_ids",
"sub_issues_count",
"created_at",
"updated_at",
"created_by",
"updated_by",
"attachment_count",
"link_count",
"is_draft",
"archived_at",
]
read_only_fields = fields
class IssueLiteSerializer(DynamicBaseSerializer):
class Meta:
model = Issue
fields = [
"id",
"sequence_id",
"project_id",
]
read_only_fields = fields
class IssueDetailSerializer(IssueSerializer):
description_html = serializers.CharField()
is_subscribed = serializers.BooleanField(read_only=True)
class Meta(IssueSerializer.Meta):
fields = IssueSerializer.Meta.fields + [
"description_html",
"is_subscribed",
]
read_only_fields = fields
class IssuePublicSerializer(BaseSerializer):
project_detail = ProjectLiteSerializer(read_only=True, source="project")
state_detail = StateLiteSerializer(read_only=True, source="state")
reactions = IssueReactionSerializer(
read_only=True, many=True, source="issue_reactions"
)
votes = IssueVoteSerializer(read_only=True, many=True)
class Meta:
model = Issue
fields = [
"id",
"name",
"description_html",
"sequence_id",
"state",
"state_detail",
"project",
"project_detail",
"workspace",
"priority",
"target_date",
"reactions",
"votes",
]
read_only_fields = fields
class IssueSubscriberSerializer(BaseSerializer):
class Meta:
model = IssueSubscriber
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"issue",
]

View File

@@ -0,0 +1,242 @@
# Third Party imports
from rest_framework import serializers
# Module imports
from .base import BaseSerializer, DynamicBaseSerializer
from .project import ProjectLiteSerializer
from plane.db.models import (
User,
Module,
ModuleMember,
ModuleIssue,
ModuleLink,
ModuleFavorite,
ModuleUserProperties,
)
class ModuleWriteSerializer(BaseSerializer):
lead_id = serializers.PrimaryKeyRelatedField(
source="lead",
queryset=User.objects.all(),
required=False,
allow_null=True,
)
member_ids = serializers.ListField(
child=serializers.PrimaryKeyRelatedField(queryset=User.objects.all()),
write_only=True,
required=False,
)
class Meta:
model = Module
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
"archived_at",
]
def to_representation(self, instance):
data = super().to_representation(instance)
data["member_ids"] = [
str(member.id) for member in instance.members.all()
]
return data
def validate(self, data):
if (
data.get("start_date", None) is not None
and data.get("target_date", None) is not None
and data.get("start_date", None) > data.get("target_date", None)
):
raise serializers.ValidationError(
"Start date cannot exceed target date"
)
return data
def create(self, validated_data):
members = validated_data.pop("member_ids", None)
project = self.context["project"]
module = Module.objects.create(**validated_data, project=project)
if members is not None:
ModuleMember.objects.bulk_create(
[
ModuleMember(
module=module,
member=member,
project=project,
workspace=project.workspace,
created_by=module.created_by,
updated_by=module.updated_by,
)
for member in members
],
batch_size=10,
ignore_conflicts=True,
)
return module
def update(self, instance, validated_data):
members = validated_data.pop("member_ids", None)
if members is not None:
ModuleMember.objects.filter(module=instance).delete()
ModuleMember.objects.bulk_create(
[
ModuleMember(
module=instance,
member=member,
project=instance.project,
workspace=instance.project.workspace,
created_by=instance.created_by,
updated_by=instance.updated_by,
)
for member in members
],
batch_size=10,
ignore_conflicts=True,
)
return super().update(instance, validated_data)
class ModuleFlatSerializer(BaseSerializer):
class Meta:
model = Module
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
]
class ModuleIssueSerializer(BaseSerializer):
module_detail = ModuleFlatSerializer(read_only=True, source="module")
issue_detail = ProjectLiteSerializer(read_only=True, source="issue")
sub_issues_count = serializers.IntegerField(read_only=True)
class Meta:
model = ModuleIssue
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
"module",
]
class ModuleLinkSerializer(BaseSerializer):
class Meta:
model = ModuleLink
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"created_by",
"updated_by",
"created_at",
"updated_at",
"module",
]
# Validation if url already exists
def create(self, validated_data):
if ModuleLink.objects.filter(
url=validated_data.get("url"),
module_id=validated_data.get("module_id"),
).exists():
raise serializers.ValidationError(
{"error": "URL already exists for this Issue"}
)
return ModuleLink.objects.create(**validated_data)
class ModuleSerializer(DynamicBaseSerializer):
member_ids = serializers.ListField(
child=serializers.UUIDField(), required=False, allow_null=True
)
is_favorite = serializers.BooleanField(read_only=True)
total_issues = serializers.IntegerField(read_only=True)
cancelled_issues = serializers.IntegerField(read_only=True)
completed_issues = serializers.IntegerField(read_only=True)
started_issues = serializers.IntegerField(read_only=True)
unstarted_issues = serializers.IntegerField(read_only=True)
backlog_issues = serializers.IntegerField(read_only=True)
class Meta:
model = Module
fields = [
# Required fields
"id",
"workspace_id",
"project_id",
# Model fields
"name",
"description",
"description_text",
"description_html",
"start_date",
"target_date",
"status",
"lead_id",
"member_ids",
"view_props",
"sort_order",
"external_source",
"external_id",
# computed fields
"is_favorite",
"total_issues",
"cancelled_issues",
"completed_issues",
"started_issues",
"unstarted_issues",
"backlog_issues",
"created_at",
"updated_at",
]
read_only_fields = fields
class ModuleDetailSerializer(ModuleSerializer):
link_module = ModuleLinkSerializer(read_only=True, many=True)
sub_issues = serializers.IntegerField(read_only=True)
class Meta(ModuleSerializer.Meta):
fields = ModuleSerializer.Meta.fields + ["link_module", "sub_issues"]
class ModuleFavoriteSerializer(BaseSerializer):
module_detail = ModuleFlatSerializer(source="module", read_only=True)
class Meta:
model = ModuleFavorite
fields = "__all__"
read_only_fields = [
"workspace",
"project",
"user",
]
class ModuleUserPropertiesSerializer(BaseSerializer):
class Meta:
model = ModuleUserProperties
fields = "__all__"
read_only_fields = ["workspace", "project", "module", "user"]

Some files were not shown because too many files have changed in this diff Show More