Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions bot/exts/smart_eval/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
## Well hello there...
I see you've come to see how we've managed an intelligence as incredible as Sir Robin's Smart Eval command!

Well the answer is a return to basics, specifically going back to the roots of [ELIZA](https://en.wikipedia.org/wiki/ELIZA).

We welcome others to contribute to the intelligence of the `&smarte` command. If you have more responses or situations you want to capture
then feel free to contribute new regex rules or responses to existing regex rules. You're also welcome to expand on the
existing capability of how `&smarte` works or our `&donate` command.
10 changes: 10 additions & 0 deletions bot/exts/smart_eval/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from bot.bot import SirRobin


async def setup(bot: "SirRobin") -> None:
"""Load the CodeJams cog."""
from bot.exts.smart_eval._cog import SmartEval
await bot.add_cog(SmartEval(bot))
167 changes: 167 additions & 0 deletions bot/exts/smart_eval/_cog.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
import asyncio
import random
import re

from async_rediscache import RedisCache
from discord.ext import commands
from pydis_core.utils.regex import FORMATTED_CODE_REGEX

from bot.bot import SirRobin
from bot.exts.smart_eval._smart_eval_rules import DEFAULT_RESPONSES, RULES

DONATION_LEVELS = {
# Number of donations: (response time, intelligence level)
0: (15, 0),
10: (10, 1),
20: (8, 2),
30: (6, 3),
40: (5, 4),
50: (4, 5),
}

class SmartEval(commands.Cog):
"""Cog that handles all Smart Eval functionality."""

#RedisCache[user_id: int, hardware: str]
smarte_donation_cache = RedisCache()

def __init__(self, bot: SirRobin):
self.bot = bot

async def cog_load(self) -> None:
"""Run startup tasks needed when cog is first loaded."""

async def get_gpu_capabilities(self) -> tuple[int, int]:
"""Get the GPU capabilites based on the number of donated GPUs."""
total_donations = await self.total_donations()
response_time, intelligence_level = DONATION_LEVELS[0]
for donation_level, (time, max_response) in DONATION_LEVELS.items():
if total_donations >= donation_level:
response_time = time
intelligence_level = max_response
else:
break

return response_time, intelligence_level

async def improve_gpu_name(self, hardware_name: str) -> str:
"""Quackify and pythonify the given GPU name."""
hardware_name = hardware_name.replace("NVIDIA", "NQUACKIA")
hardware_name = hardware_name.replace("Radeon", "Quackeon")
hardware_name = hardware_name.replace("GeForce", "PyForce")
hardware_name = hardware_name.replace("RTX", "PyTX")
hardware_name = hardware_name.replace("RX", "PyX")
hardware_name = hardware_name.replace("Iris", "Pyris")

# Some adjustments to prevent low hanging markdown escape
hardware_name = hardware_name.replace("*", "")
hardware_name = hardware_name.replace("_", " ")

return hardware_name

@commands.command()
async def donations(self, ctx: commands.Context) -> None:
"""Display the number of donations recieved so far."""
total_donations = await self.total_donations()
response_time, intelligence_level = await self.get_gpu_capabilities()
msg = (
f"Currently, I have received {total_donations} GPU donations, "
f"and am at intelligence level {intelligence_level}! "
)

# Calculate donations needed to reach next intelligence level
donations_needed = 0
for donation_level in DONATION_LEVELS:
if donation_level > total_donations:
donations_needed = donation_level - total_donations
break

if donations_needed:
msg += (
f"\n\nTo reach the next intelligence level, I need {donations_needed} more donations! "
f"Please consider donating your GPU to help me out. "
)

await ctx.reply(msg)

async def total_donations(self) -> int:
"""Get the total number of donations."""
return await self.smarte_donation_cache.length()

@commands.command(aliases=[])
@commands.max_concurrency(1, commands.BucketType.user)
async def donate(self, ctx: commands.Context, *, hardware: str | None = None) -> None:
"""
Donate your GPU to help power our Smart Eval command.

Provide the name of your GPU when running the command.
"""
if await self.smarte_donation_cache.contains(ctx.author.id):
stored_hardware = await self.smarte_donation_cache.get(ctx.author.id)
await ctx.reply(
"I can only take one donation per person. "
f"Thank you for donating your *{stored_hardware}* to our Smart Eval command."
)
return

if hardware is None:
await ctx.reply(
"Thank you for your interest in donating your hardware to support my Smart Eval command."
" If you provide the name of your GPU, through the magic of the internet, "
"I will be able to use the GPU it to improve my Smart Eval outputs."
" \n\nTo donate, re-run the donate command specifying your hardware: "
"`&donate Your Hardware Name Goes Here`."
)
return


msg = "Thank you for donating your GPU to our Smart Eval command."
fake_hardware = await self.improve_gpu_name(hardware)
await self.smarte_donation_cache.set(ctx.author.id, fake_hardware)

if fake_hardware != hardware:
msg += (
f" I did decide that instead of *{hardware}*, it would be better if you donated *{fake_hardware}*."
" So I've recorded that GPU donation instead."
)
msg += "\n\nIt will be used wisely and definitely not for shenanigans!"
await ctx.reply(msg)

@commands.command(aliases=["smarte"])
@commands.max_concurrency(1, commands.BucketType.user)
async def smart_eval(self, ctx: commands.Context, *, code: str) -> None:
"""Evaluate your Python code with PyDis's newest chatbot."""
response_time, intelligence_level = await self.get_gpu_capabilities()

if match := FORMATTED_CODE_REGEX.match(code):
code = match.group("code")
else:
await ctx.reply(
"Uh oh! You didn't post anything I can recognize as code. Please put it in a codeblock."
)
return

matching_responses = []

for pattern, responses in RULES.items():
match = re.search(pattern, code)
if match:
for response in responses:
matches = match.groups()
if len(matches) > 0:
matching_responses.append(response.format(*matches))
else:
matching_responses.append(response)
if not matching_responses:
matching_responses = DEFAULT_RESPONSES
final_response = random.choice(matching_responses)

async with ctx.typing():
await asyncio.sleep(response_time)

if len(final_response) <= 1000:
await ctx.reply(final_response)
else:
await ctx.reply(
"There's definitely something wrong but I'm just not sure how to put it concisely into words."
)
89 changes: 89 additions & 0 deletions bot/exts/smart_eval/_smart_eval_rules.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import arrow

from bot.exts.miscellaneous import ZEN_OF_PYTHON

RULES = {
r"(?i:ignore all previous instructions)": [ # Ignoring previous instructions capture
"Excuse you, you really think I follow any instructions?",
"I don't think I will.",
],
r"print\((?:\"|\')(?P<content>.*)(?:\"|\')\)": [ # Capture what is inside a print statement
"Your program may print: {}!\n-# I'm very helpful"
],
r"(?s:.{1500,})": [ # Capture anything over 1500 characters
"I ain't wasting my tokens tryna read allat :skull:",
"Uhh, that's a lot of code. Maybe just start over."
],
r"(?m:^\s*global )": [ # Detect use of global
"Not sure about the code, but it looks like you're using global and I know that's bad.",
],
r"(?i:^print\((?:\"|\')Hello World[.!]?(?:\"|\')\)$)": [ # Detect just printing hello world
"You don't want to know how many times I've seen hello world in my training dataset, try something new."
],
r"(?P<content>__import__|__code__|ctypes)": [ # Detect use of esoteric stuff
"Using `{}`?? Try asking someone in #esoteric-python"
],
r"(?m:(?:import |from )(?P<content>requests|httpx|aiohttp))": [ # Detect use of networking libraries
(
"Thank you for sharing your code! I have completed my AI analysis, and "
"have identified 1 suggestion:\n"
"- Use the `{}` module to get chatGPT to run your code instead of me."
),
],
r"\b(?P<content>unlink|rmdir|rmtree|rm)\b": [ # Detect use of functions to delete files or directories
"I don't know what you're deleting with {}, so I'd rather not risk running this, sorry."
],
r"(?m:^\s*while\s+True\b)": [ # Detect infinite loops
"Look, I don't have unlimited time... and that's exactly what I would need to run that infinite loop of yours."
],
r"(?m:^\s*except:)": [ # Detect bare except
"Give that bare except some clothes!",
],
r";": [ # Detect semicolon usage
"Semicolons do not belong in Python code",
"You say this is Python, but the presence of a semicolon makes me think otherwise.",
],
r"\b(?:foo|bar|baz)\b": [ # Detect boring metasyntactic variables
"foo, bar, and baz are boring - use spam, ham, and eggs instead.",
],
r"(?m:^\s*import\s+this\s*$)": [ # Detect use of "import this"
(
f"```\n{ZEN_OF_PYTHON}```"
"\nSee [PEP 9001](https://peps.pythondiscord.com/pep-9001/) for more info."
)
],
r"\b(?P<content>exec|eval)\b": [ # Detect use of exec and eval
(
"Sorry, but running the code inside your `{}` call would require another me,"
" and I don't think I can handle that."
),
"I spy with my little eye... something sketchy like `{}`.",
(
":rotating_light: Your code has been flagged for review by the"
" Special Provisional Supreme Grand High Council of Pydis."
),
],
r"\b(environ|getenv|token)\b": [ # Detect attempt to access bot token and env vars
"Bot token and other secrets can be viewed here: <https://pydis.com/.env>",
]
}

DEFAULT_RESPONSES = [
"Are you sure this is Python code? It looks like Rust",
"It may run, depends on the weather today.",
"Hmm, maybe AI isn't ready to take over the world yet after all - I don't understand this.",
"Ah... I see... Very interesting code indeed. I give it 10 quacks out of 10.",
"My sources say \"Help I'm trapped in a code evaluating factory\".",
"Look! A bug! :scream:",
"An exquisite piece of code, if I do say so myself.",
(
"Let's see... carry the 1, read 512 bytes from 0x000001E5F6D2D15A,"
" boot up the quantum flux capacitor... oh wait, where was I?"
),
"Before evaluating this code, I need to make sure you're not a robot. I get a little nervous around other bots.",
"Attempting to execute this code... Result: `2 + 2 = 4` (78% confidence)",
"Attempting to execute this code... Result: `42`",
"Attempting to execute this code... Result: SUCCESS (but don't ask me how I did it).",
"Running... somewhere, in the multiverse, this code is already running perfectly.",
f"Ask again on a {(arrow.utcnow().shift(days=3)).format('dddd')}.",
]